diff --git a/clang/include/clang/Basic/BuiltinsRISCV.def b/clang/include/clang/Basic/BuiltinsRISCV.def --- a/clang/include/clang/Basic/BuiltinsRISCV.def +++ b/clang/include/clang/Basic/BuiltinsRISCV.def @@ -1,183 +1,3 @@ -#if defined(BUILTIN) && !defined(RISCVV_BUILTIN) -#define RISCVV_BUILTIN(ID, TYPE, ATTRS) BUILTIN(ID, TYPE, ATTRS) -#endif -RISCVV_BUILTIN(vadd_vv_i8m1_vl, "q8Scq8Scq8Scz", "n") -RISCVV_BUILTIN(vadd_vv_i8m1_m_vl, "q8Scq8bq8Scq8Scq8Scz", "n") -RISCVV_BUILTIN(vadd_vv_i16m1_vl, "q4Ssq4Ssq4Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i16m1_m_vl, "q4Ssq4bq4Ssq4Ssq4Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i32m1_vl, "q2Siq2Siq2Siz", "n") -RISCVV_BUILTIN(vadd_vv_i32m1_m_vl, "q2Siq2bq2Siq2Siq2Siz", "n") -RISCVV_BUILTIN(vadd_vv_i64m1_vl, "q1SWiq1SWiq1SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i64m1_m_vl, "q1SWiq1bq1SWiq1SWiq1SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i8m2_vl, "q16Scq16Scq16Scz", "n") -RISCVV_BUILTIN(vadd_vv_i8m2_m_vl, "q16Scq16bq16Scq16Scq16Scz", "n") -RISCVV_BUILTIN(vadd_vv_i16m2_vl, "q8Ssq8Ssq8Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i16m2_m_vl, "q8Ssq8bq8Ssq8Ssq8Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i32m2_vl, "q4Siq4Siq4Siz", "n") -RISCVV_BUILTIN(vadd_vv_i32m2_m_vl, "q4Siq4bq4Siq4Siq4Siz", "n") -RISCVV_BUILTIN(vadd_vv_i64m2_vl, "q2SWiq2SWiq2SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i64m2_m_vl, "q2SWiq2bq2SWiq2SWiq2SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i8m4_vl, "q32Scq32Scq32Scz", "n") -RISCVV_BUILTIN(vadd_vv_i8m4_m_vl, "q32Scq32bq32Scq32Scq32Scz", "n") -RISCVV_BUILTIN(vadd_vv_i16m4_vl, "q16Ssq16Ssq16Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i16m4_m_vl, "q16Ssq16bq16Ssq16Ssq16Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i32m4_vl, "q8Siq8Siq8Siz", "n") -RISCVV_BUILTIN(vadd_vv_i32m4_m_vl, "q8Siq8bq8Siq8Siq8Siz", "n") -RISCVV_BUILTIN(vadd_vv_i64m4_vl, "q4SWiq4SWiq4SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i64m4_m_vl, "q4SWiq4bq4SWiq4SWiq4SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i8m8_vl, "q64Scq64Scq64Scz", "n") -RISCVV_BUILTIN(vadd_vv_i8m8_m_vl, "q64Scq64bq64Scq64Scq64Scz", "n") -RISCVV_BUILTIN(vadd_vv_i16m8_vl, "q32Ssq32Ssq32Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i16m8_m_vl, "q32Ssq32bq32Ssq32Ssq32Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i32m8_vl, "q16Siq16Siq16Siz", "n") -RISCVV_BUILTIN(vadd_vv_i32m8_m_vl, "q16Siq16bq16Siq16Siq16Siz", "n") -RISCVV_BUILTIN(vadd_vv_i64m8_vl, "q8SWiq8SWiq8SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i64m8_m_vl, "q8SWiq8bq8SWiq8SWiq8SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i8mf2_vl, "q4Scq4Scq4Scz", "n") -RISCVV_BUILTIN(vadd_vv_i8mf2_m_vl, "q4Scq4bq4Scq4Scq4Scz", "n") -RISCVV_BUILTIN(vadd_vv_i16mf2_vl, "q2Ssq2Ssq2Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i16mf2_m_vl, "q2Ssq2bq2Ssq2Ssq2Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i32mf2_vl, "q1Siq1Siq1Siz", "n") -RISCVV_BUILTIN(vadd_vv_i32mf2_m_vl, "q1Siq1bq1Siq1Siq1Siz", "n") -RISCVV_BUILTIN(vadd_vv_i8mf4_vl, "q2Scq2Scq2Scz", "n") -RISCVV_BUILTIN(vadd_vv_i8mf4_m_vl, "q2Scq2bq2Scq2Scq2Scz", "n") -RISCVV_BUILTIN(vadd_vv_i16mf4_vl, "q1Ssq1Ssq1Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i16mf4_m_vl, "q1Ssq1bq1Ssq1Ssq1Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i8mf8_vl, "q1Scq1Scq1Scz", "n") -RISCVV_BUILTIN(vadd_vv_i8mf8_m_vl, "q1Scq1bq1Scq1Scq1Scz", "n") -RISCVV_BUILTIN(vadd_vx_i8m1_vl, "q8Scq8ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i8m1_m_vl, "q8Scq8bq8Scq8ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i16m1_vl, "q4Ssq4SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i16m1_m_vl, "q4Ssq4bq4Ssq4SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i32m1_vl, "q2Siq2SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i32m1_m_vl, "q2Siq2bq2Siq2SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m1_vl, "q1SWiq1SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m1_m_vl, "q1SWiq1bq1SWiq1SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i8m2_vl, "q16Scq16ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i8m2_m_vl, "q16Scq16bq16Scq16ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i16m2_vl, "q8Ssq8SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i16m2_m_vl, "q8Ssq8bq8Ssq8SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i32m2_vl, "q4Siq4SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i32m2_m_vl, "q4Siq4bq4Siq4SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m2_vl, "q2SWiq2SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m2_m_vl, "q2SWiq2bq2SWiq2SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i8m4_vl, "q32Scq32ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i8m4_m_vl, "q32Scq32bq32Scq32ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i16m4_vl, "q16Ssq16SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i16m4_m_vl, "q16Ssq16bq16Ssq16SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i32m4_vl, "q8Siq8SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i32m4_m_vl, "q8Siq8bq8Siq8SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m4_vl, "q4SWiq4SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m4_m_vl, "q4SWiq4bq4SWiq4SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i8m8_vl, "q64Scq64ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i8m8_m_vl, "q64Scq64bq64Scq64ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i16m8_vl, "q32Ssq32SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i16m8_m_vl, "q32Ssq32bq32Ssq32SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i32m8_vl, "q16Siq16SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i32m8_m_vl, "q16Siq16bq16Siq16SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m8_vl, "q8SWiq8SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m8_m_vl, "q8SWiq8bq8SWiq8SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i8mf2_vl, "q4Scq4ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i8mf2_m_vl, "q4Scq4bq4Scq4ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i16mf2_vl, "q2Ssq2SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i16mf2_m_vl, "q2Ssq2bq2Ssq2SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i32mf2_vl, "q1Siq1SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i32mf2_m_vl, "q1Siq1bq1Siq1SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i8mf4_vl, "q2Scq2ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i8mf4_m_vl, "q2Scq2bq2Scq2ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i16mf4_vl, "q1Ssq1SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i16mf4_m_vl, "q1Ssq1bq1Ssq1SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i8mf8_vl, "q1Scq1ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i8mf8_m_vl, "q1Scq1bq1Scq1ScScz", "n") -RISCVV_BUILTIN(vadd_vv_u8m1_vl, "q8Ucq8Ucq8Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u8m1_m_vl, "q8Ucq8bq8Ucq8Ucq8Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u16m1_vl, "q4Usq4Usq4Usz", "n") -RISCVV_BUILTIN(vadd_vv_u16m1_m_vl, "q4Usq4bq4Usq4Usq4Usz", "n") -RISCVV_BUILTIN(vadd_vv_u32m1_vl, "q2Uiq2Uiq2Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u32m1_m_vl, "q2Uiq2bq2Uiq2Uiq2Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m1_vl, "q1UWiq1UWiq1UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m1_m_vl, "q1UWiq1bq1UWiq1UWiq1UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u8m2_vl, "q16Ucq16Ucq16Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u8m2_m_vl, "q16Ucq16bq16Ucq16Ucq16Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u16m2_vl, "q8Usq8Usq8Usz", "n") -RISCVV_BUILTIN(vadd_vv_u16m2_m_vl, "q8Usq8bq8Usq8Usq8Usz", "n") -RISCVV_BUILTIN(vadd_vv_u32m2_vl, "q4Uiq4Uiq4Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u32m2_m_vl, "q4Uiq4bq4Uiq4Uiq4Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m2_vl, "q2UWiq2UWiq2UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m2_m_vl, "q2UWiq2bq2UWiq2UWiq2UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u8m4_vl, "q32Ucq32Ucq32Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u8m4_m_vl, "q32Ucq32bq32Ucq32Ucq32Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u16m4_vl, "q16Usq16Usq16Usz", "n") -RISCVV_BUILTIN(vadd_vv_u16m4_m_vl, "q16Usq16bq16Usq16Usq16Usz", "n") -RISCVV_BUILTIN(vadd_vv_u32m4_vl, "q8Uiq8Uiq8Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u32m4_m_vl, "q8Uiq8bq8Uiq8Uiq8Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m4_vl, "q4UWiq4UWiq4UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m4_m_vl, "q4UWiq4bq4UWiq4UWiq4UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u8m8_vl, "q64Ucq64Ucq64Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u8m8_m_vl, "q64Ucq64bq64Ucq64Ucq64Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u16m8_vl, "q32Usq32Usq32Usz", "n") -RISCVV_BUILTIN(vadd_vv_u16m8_m_vl, "q32Usq32bq32Usq32Usq32Usz", "n") -RISCVV_BUILTIN(vadd_vv_u32m8_vl, "q16Uiq16Uiq16Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u32m8_m_vl, "q16Uiq16bq16Uiq16Uiq16Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m8_vl, "q8UWiq8UWiq8UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m8_m_vl, "q8UWiq8bq8UWiq8UWiq8UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u8mf2_vl, "q4Ucq4Ucq4Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u8mf2_m_vl, "q4Ucq4bq4Ucq4Ucq4Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u16mf2_vl, "q2Usq2Usq2Usz", "n") -RISCVV_BUILTIN(vadd_vv_u16mf2_m_vl, "q2Usq2bq2Usq2Usq2Usz", "n") -RISCVV_BUILTIN(vadd_vv_u32mf2_vl, "q1Uiq1Uiq1Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u32mf2_m_vl, "q1Uiq1bq1Uiq1Uiq1Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u8mf4_vl, "q2Ucq2Ucq2Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u8mf4_m_vl, "q2Ucq2bq2Ucq2Ucq2Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u16mf4_vl, "q1Usq1Usq1Usz", "n") -RISCVV_BUILTIN(vadd_vv_u16mf4_m_vl, "q1Usq1bq1Usq1Usq1Usz", "n") -RISCVV_BUILTIN(vadd_vv_u8mf8_vl, "q1Ucq1Ucq1Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u8mf8_m_vl, "q1Ucq1bq1Ucq1Ucq1Ucz", "n") -RISCVV_BUILTIN(vadd_vx_u8m1_vl, "q8Ucq8UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u8m1_m_vl, "q8Ucq8bq8Ucq8UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u16m1_vl, "q4Usq4UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u16m1_m_vl, "q4Usq4bq4Usq4UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u32m1_vl, "q2Uiq2UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u32m1_m_vl, "q2Uiq2bq2Uiq2UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m1_vl, "q1UWiq1UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m1_m_vl, "q1UWiq1bq1UWiq1UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u8m2_vl, "q16Ucq16UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u8m2_m_vl, "q16Ucq16bq16Ucq16UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u16m2_vl, "q8Usq8UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u16m2_m_vl, "q8Usq8bq8Usq8UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u32m2_vl, "q4Uiq4UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u32m2_m_vl, "q4Uiq4bq4Uiq4UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m2_vl, "q2UWiq2UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m2_m_vl, "q2UWiq2bq2UWiq2UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u8m4_vl, "q32Ucq32UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u8m4_m_vl, "q32Ucq32bq32Ucq32UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u16m4_vl, "q16Usq16UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u16m4_m_vl, "q16Usq16bq16Usq16UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u32m4_vl, "q8Uiq8UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u32m4_m_vl, "q8Uiq8bq8Uiq8UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m4_vl, "q4UWiq4UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m4_m_vl, "q4UWiq4bq4UWiq4UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u8m8_vl, "q64Ucq64UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u8m8_m_vl, "q64Ucq64bq64Ucq64UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u16m8_vl, "q32Usq32UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u16m8_m_vl, "q32Usq32bq32Usq32UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u32m8_vl, "q16Uiq16UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u32m8_m_vl, "q16Uiq16bq16Uiq16UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m8_vl, "q8UWiq8UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m8_m_vl, "q8UWiq8bq8UWiq8UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u8mf2_vl, "q4Ucq4UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u8mf2_m_vl, "q4Ucq4bq4Ucq4UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u16mf2_vl, "q2Usq2UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u16mf2_m_vl, "q2Usq2bq2Usq2UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u32mf2_vl, "q1Uiq1UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u32mf2_m_vl, "q1Uiq1bq1Uiq1UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u8mf4_vl, "q2Ucq2UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u8mf4_m_vl, "q2Ucq2bq2Ucq2UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u16mf4_vl, "q1Usq1UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u16mf4_m_vl, "q1Usq1bq1Usq1UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u8mf8_vl, "q1Ucq1UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u8mf8_m_vl, "q1Ucq1bq1Ucq1UcUcz", "n") +#include "clang/Basic/riscv_vector_builtins.inc" -#undef BUILTIN -#undef RISCVV_BUILTIN diff --git a/clang/include/clang/Basic/CMakeLists.txt b/clang/include/clang/Basic/CMakeLists.txt --- a/clang/include/clang/Basic/CMakeLists.txt +++ b/clang/include/clang/Basic/CMakeLists.txt @@ -84,3 +84,9 @@ clang_tablegen(arm_cde_builtin_aliases.inc -gen-arm-cde-builtin-aliases SOURCE arm_cde.td TARGET ClangARMCdeBuiltinAliases) +clang_tablegen(riscv_vector_builtins.inc -gen-riscv-vector-builtins + SOURCE riscv_vector.td + TARGET ClangRISCVVectorBuiltins) +clang_tablegen(riscv_vector_builtin_cg.inc -gen-riscv-vector-builtin-codegen + SOURCE riscv_vector.td + TARGET ClangRISCVVectorBuiltinCG) diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td new file mode 100644 --- /dev/null +++ b/clang/include/clang/Basic/riscv_vector.td @@ -0,0 +1,211 @@ +//==--- riscv_vector.td - RISC-V V-ext Builtin function list --------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the builtins for RISC-V V-extension. See: +// +// https://github.com/riscv/rvv-intrinsic-doc +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Instruction definitions +//===----------------------------------------------------------------------===// +// Each record of the class RVVBuiltin defines a collection of builtins (i.e. +// "def vadd : RVVBuiltin" will be used to define things like "vadd_vv_i32m1", +// "vadd_vv_i32m2", etc). +// +// The elements of this collection are defined by an instantiation process the +// range of which is specified by the cross product of the LMUL attribute and +// every element in the attribute TypeRange. By default builtins have LMUL = [1, +// 2, 4, 8, 1/2, 1/4, 1/8] so the process is repeated 7 times. In tablegen we +// use the Log2LMUL [0, 1, 2, 3, -1, -2, -3] to represent the LMUL. +// +// LMUL represents the fact that the types of values used by that builtin are +// values generated by instructions that are executed under that LMUL. However, +// this does not mean the builtin is necessarily lowered into an instruction +// that executes under the specified LMUL. An example where this happens are +// loads and stores of masks. A mask like `vbool8_t` can be generated, for +// instance, by comparing two `__rvv_int8m1_t` (this is LMUL=1) or comparing two +// `__rvv_int16m2_t` (this is LMUL=2). The actual load or store, however, will +// be performed under LMUL=1 because mask registers are not grouped. +// +// TypeRange is a non-empty sequence of basic types: +// +// c: int8_t (i8) +// s: int16_t (i16) +// i: int32_t (i32) +// l: int64_t (i64) +// h: float16_t (half) +// f: float32_t (float) +// d: float64_t (double) +// +// This way, given an LMUL, a record with a TypeRange "sil" will cause the +// definition of 3 builtins. Each type "t" in the TypeRange (in this example +// they are int16_t, int32_t, int64_t) is used as a parameter that drives the +// definition of that particular builtin (for the given LMUL). +// +// During the instantiation, types can be transformed or modified using type +// transformers. Given a type "t" the following primitive type transformers can +// be applied to it to yield another type. +// +// e: type of "t" as is (identity) +// v: computes a vector type whose element type is "t" for the current LMUL +// w: computes a vector type identical to what 'v' computes except for the +// element type which is twice as wide as the element type of 'v' +// q: computes a vector type identical to what 'v' computes except for the +// element type which is four times as wide as the element type of 'v' +// o: computes a vector type identical to what 'v' computes except for the +// element type which is eight times as wide as the element type of 'v' +// m: computes a vector type identical to what 'v' computes except for the +// element type which is bool +// 0: void type, ignores "t" +// z: size_t, ignores "t" +// t: ptrdiff_t, ignores "t" +// c: uint8_t, ignores "t" +// +// So for instance if t is "i", i.e. int, then "e" will yield int again. "v" +// will yield an RVV vector type (assume LMUL=1), so __rvv_int32m1_t. +// Accordingly "w" would yield __rvv_int64m2_t. +// +// A type transformer can be prefixed by other non-primitive type transformers. +// +// P: constructs a pointer to the current type +// C: adds const to the type +// K: requires the integer type to be a constant expression +// U: given an integer type or vector type, computes its unsigned variant +// I: given a vector type, compute the vector type with integer type +// elements of the same width +// F: given a vector type, compute the vector type with floating-point type +// elements of the same width +// W: widens an integer or float type. Cannot be used on vectors +// S: given a vector type, computes its equivalent one for LMUL=1. This is a +// no-op if the vector was already LMUL=1 +// +// Following with the example above, if t is "i", then "Ue" will yield unsigned +// int and "Fv" will yield __rvv_float32m1_t (again assuming LMUL=1), Fw would +// yield __rvv_float64m2_t, etc. +// +// Each builtin is then defined by applying each type in TypeRange against the +// sequence of type transformers described in Suffix and Prototype. +// +// The name of the builtin is defined by the Name attribute (which defaults to +// the name of the class) appended (separated with an underscore) the Suffix +// attribute. For instance with Name="foo", Suffix = "v" and TypeRange = "il", +// the builtin generated will be __builtin_rvv_foo_i32m1 and +// __builtin_rvv_foo_i64m1 (under LMUL=1). If Suffix contains more than one +// type transformer (say "vv") each of the types is separated with an +// underscore as in "__builtin_rvv_foo_i32m1_i32m1". +// +// The C/C++ prototype of the builtin is defined by the Prototype attribute. +// Prototype is a non-empty sequence of type transformers, the first of which +// is the return type of the builtin and the rest are the parameters of the +// builtin, in order. For instance if Prototype is "wvv" and TypeRange is "si" +// a first builtin will have type +// __rvv_int32m2_t (__rvv_int16m1_t, __rvv_int16m1_t) and the second builtin +// will have type __rvv_int64m2_t (__rvv_int32m1_t, __rvv_int32m1_t) (again +// under LMUL=1). +// +// There are a number of attributes that are used to constraint the number and +// shape of the builtins generated. Refer to the comments below for them. +class RVVBuiltin { + // Base name that will be prepended in __builtin_rvv_ and appended the + // computed Suffix. + string Name = NAME; + + // If not empty, each instantiated builtin will have this appended after an + // underscore (_). It is instantiated like Prototype. + string Suffix = suffix; + + // If empty, default MangledName is sub string of `Name` which end of first + // '_'. For example, the default mangled name is `vadd` for Name `vadd_vv`. + // It's used for describe some special naming cases. + string MangledName = ""; + + // The different variants of the builtin, parameterised with a type. + string TypeRange = type_range; + + // We use each type described in TypeRange and LMUL with prototype to + // instantiate a specific element of the set of builtins being defined. + // Prototype attribute defines the C/C++ prototype of the builtin. It is a + // non-empty sequence of type transformers, the first of which is the return + // type of the builtin and the rest are the parameters of the builtin, in + // order. For instance if Prototype is "wvv", TypeRange is "si" and LMUL=1, a + // first builtin will have type + // __rvv_int32m2_t (__rvv_int16m1_t, __rvv_int16m1_t), and the second builtin + // will have type __rvv_int64m2_t (__rvv_int32m1_t, __rvv_int32m1_t). + string Prototype = prototype; + + // This builtin has a masked form. + bit HasMask = true; + + // If HasMask, this flag states that this builtin has a maskedoff operand. It + // is always the first operand. + bit HasMaskedOffOperand = true; + + // This builtin has a granted vector length parameter in the last position. + bit HasVL = true; + + // This builtin supports function overloading and has a mangled name. + bit HasGeneric = true; + + // Reads or writes "memory" or has other side-effects. + bit HasSideEffects = false; + + // This builtin is valid for the given Log2LMULs. + list Log2LMUL = [0, 1, 2, 3, -1, -2, -3]; + + // Emit the automatic clang codegen. It describes what types we have to use + // to obtain the specific LLVM intrinsic. -1 means the return type, otherwise, + // k >= 0 meaning the k-th operand (counting from zero) of the codegen'd + // parameter of the unmasked version. k can't be the mask operand's position. + list IntrinsicTypes = []; + + // If these names are not empty, this is the ID of the LLVM intrinsic + // we want to lower to. + string IRName = NAME; + + // If HasMask, this is the ID of the LLVM intrinsic we want to lower to. + string IRNameMask = NAME #"_mask"; +} + +//===----------------------------------------------------------------------===// +// Basic classes with automatic codegen. +//===----------------------------------------------------------------------===// + +class RVVBinBuiltin + : RVVBuiltin { + let IntrinsicTypes = [-1, 1]; +} + +multiclass RVVBinBuiltinSet> suffixes_prototypes> { + let IRName = intrinsic_name, IRNameMask = intrinsic_name # "_mask" in { + foreach s_p = suffixes_prototypes in { + let Name = NAME # "_" # s_p[0] in { + defvar suffix = s_p[1]; + defvar prototype = s_p[2]; + def : RVVBinBuiltin; + } + } + } +} + +// 12. Vector Integer Arithmetic Instructions +// 12.1. Vector Single-Width Integer Add and Subtract +defm vadd : RVVBinBuiltinSet<"vadd", "csil", + [["vv", "v", "vvv"], + ["vx", "v", "vve"], + ["vv", "Uv", "UvUvUv"], + ["vx", "Uv", "UvUvUe"]]>; + +// 14. Vector Floating-Point Instructions +// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions +defm vfadd : RVVBinBuiltinSet<"vfadd", "fd", + [["vv", "v", "vvv"], + ["vf", "v", "vve"]]>; diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -17661,196 +17661,7 @@ // Required for overloaded intrinsics. llvm::SmallVector IntrinsicTypes; switch (BuiltinID) { - // We could generate all the possible combinations and handling code in - // a file and include it here, instead of listing all the builtins plainly. - // Something like - // #include clang/Basic/RISCVVBuiltinCodeGen.inc - case RISCV::BIvadd_vv_i8m1_vl: - case RISCV::BIvadd_vv_i16m1_vl: - case RISCV::BIvadd_vv_i32m1_vl: - case RISCV::BIvadd_vv_i64m1_vl: - case RISCV::BIvadd_vv_i8m2_vl: - case RISCV::BIvadd_vv_i16m2_vl: - case RISCV::BIvadd_vv_i32m2_vl: - case RISCV::BIvadd_vv_i64m2_vl: - case RISCV::BIvadd_vv_i8m4_vl: - case RISCV::BIvadd_vv_i16m4_vl: - case RISCV::BIvadd_vv_i32m4_vl: - case RISCV::BIvadd_vv_i64m4_vl: - case RISCV::BIvadd_vv_i8m8_vl: - case RISCV::BIvadd_vv_i16m8_vl: - case RISCV::BIvadd_vv_i32m8_vl: - case RISCV::BIvadd_vv_i64m8_vl: - case RISCV::BIvadd_vv_i8mf2_vl: - case RISCV::BIvadd_vv_i16mf2_vl: - case RISCV::BIvadd_vv_i32mf2_vl: - case RISCV::BIvadd_vv_i8mf4_vl: - case RISCV::BIvadd_vv_i16mf4_vl: - case RISCV::BIvadd_vv_i8mf8_vl: - case RISCV::BIvadd_vx_i8m1_vl: - case RISCV::BIvadd_vx_i16m1_vl: - case RISCV::BIvadd_vx_i32m1_vl: - case RISCV::BIvadd_vx_i64m1_vl: - case RISCV::BIvadd_vx_i8m2_vl: - case RISCV::BIvadd_vx_i16m2_vl: - case RISCV::BIvadd_vx_i32m2_vl: - case RISCV::BIvadd_vx_i64m2_vl: - case RISCV::BIvadd_vx_i8m4_vl: - case RISCV::BIvadd_vx_i16m4_vl: - case RISCV::BIvadd_vx_i32m4_vl: - case RISCV::BIvadd_vx_i64m4_vl: - case RISCV::BIvadd_vx_i8m8_vl: - case RISCV::BIvadd_vx_i16m8_vl: - case RISCV::BIvadd_vx_i32m8_vl: - case RISCV::BIvadd_vx_i64m8_vl: - case RISCV::BIvadd_vx_i8mf2_vl: - case RISCV::BIvadd_vx_i16mf2_vl: - case RISCV::BIvadd_vx_i32mf2_vl: - case RISCV::BIvadd_vx_i8mf4_vl: - case RISCV::BIvadd_vx_i16mf4_vl: - case RISCV::BIvadd_vx_i8mf8_vl: - case RISCV::BIvadd_vv_u8m1_vl: - case RISCV::BIvadd_vv_u16m1_vl: - case RISCV::BIvadd_vv_u32m1_vl: - case RISCV::BIvadd_vv_u64m1_vl: - case RISCV::BIvadd_vv_u8m2_vl: - case RISCV::BIvadd_vv_u16m2_vl: - case RISCV::BIvadd_vv_u32m2_vl: - case RISCV::BIvadd_vv_u64m2_vl: - case RISCV::BIvadd_vv_u8m4_vl: - case RISCV::BIvadd_vv_u16m4_vl: - case RISCV::BIvadd_vv_u32m4_vl: - case RISCV::BIvadd_vv_u64m4_vl: - case RISCV::BIvadd_vv_u8m8_vl: - case RISCV::BIvadd_vv_u16m8_vl: - case RISCV::BIvadd_vv_u32m8_vl: - case RISCV::BIvadd_vv_u64m8_vl: - case RISCV::BIvadd_vv_u8mf2_vl: - case RISCV::BIvadd_vv_u16mf2_vl: - case RISCV::BIvadd_vv_u32mf2_vl: - case RISCV::BIvadd_vv_u8mf4_vl: - case RISCV::BIvadd_vv_u16mf4_vl: - case RISCV::BIvadd_vv_u8mf8_vl: - case RISCV::BIvadd_vx_u8m1_vl: - case RISCV::BIvadd_vx_u16m1_vl: - case RISCV::BIvadd_vx_u32m1_vl: - case RISCV::BIvadd_vx_u64m1_vl: - case RISCV::BIvadd_vx_u8m2_vl: - case RISCV::BIvadd_vx_u16m2_vl: - case RISCV::BIvadd_vx_u32m2_vl: - case RISCV::BIvadd_vx_u64m2_vl: - case RISCV::BIvadd_vx_u8m4_vl: - case RISCV::BIvadd_vx_u16m4_vl: - case RISCV::BIvadd_vx_u32m4_vl: - case RISCV::BIvadd_vx_u64m4_vl: - case RISCV::BIvadd_vx_u8m8_vl: - case RISCV::BIvadd_vx_u16m8_vl: - case RISCV::BIvadd_vx_u32m8_vl: - case RISCV::BIvadd_vx_u64m8_vl: - case RISCV::BIvadd_vx_u8mf2_vl: - case RISCV::BIvadd_vx_u16mf2_vl: - case RISCV::BIvadd_vx_u32mf2_vl: - case RISCV::BIvadd_vx_u8mf4_vl: - case RISCV::BIvadd_vx_u16mf4_vl: - case RISCV::BIvadd_vx_u8mf8_vl: - // The order of operands is (op1, op2, vl). - ID = Intrinsic::riscv_vadd; - IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType()}; - break; - case RISCV::BIvadd_vv_i8m1_m_vl: - case RISCV::BIvadd_vv_i16m1_m_vl: - case RISCV::BIvadd_vv_i32m1_m_vl: - case RISCV::BIvadd_vv_i64m1_m_vl: - case RISCV::BIvadd_vv_i8m2_m_vl: - case RISCV::BIvadd_vv_i16m2_m_vl: - case RISCV::BIvadd_vv_i32m2_m_vl: - case RISCV::BIvadd_vv_i64m2_m_vl: - case RISCV::BIvadd_vv_i8m4_m_vl: - case RISCV::BIvadd_vv_i16m4_m_vl: - case RISCV::BIvadd_vv_i32m4_m_vl: - case RISCV::BIvadd_vv_i64m4_m_vl: - case RISCV::BIvadd_vv_i8m8_m_vl: - case RISCV::BIvadd_vv_i16m8_m_vl: - case RISCV::BIvadd_vv_i32m8_m_vl: - case RISCV::BIvadd_vv_i64m8_m_vl: - case RISCV::BIvadd_vv_i8mf2_m_vl: - case RISCV::BIvadd_vv_i16mf2_m_vl: - case RISCV::BIvadd_vv_i32mf2_m_vl: - case RISCV::BIvadd_vv_i8mf4_m_vl: - case RISCV::BIvadd_vv_i16mf4_m_vl: - case RISCV::BIvadd_vv_i8mf8_m_vl: - case RISCV::BIvadd_vx_i8m1_m_vl: - case RISCV::BIvadd_vx_i16m1_m_vl: - case RISCV::BIvadd_vx_i32m1_m_vl: - case RISCV::BIvadd_vx_i64m1_m_vl: - case RISCV::BIvadd_vx_i8m2_m_vl: - case RISCV::BIvadd_vx_i16m2_m_vl: - case RISCV::BIvadd_vx_i32m2_m_vl: - case RISCV::BIvadd_vx_i64m2_m_vl: - case RISCV::BIvadd_vx_i8m4_m_vl: - case RISCV::BIvadd_vx_i16m4_m_vl: - case RISCV::BIvadd_vx_i32m4_m_vl: - case RISCV::BIvadd_vx_i64m4_m_vl: - case RISCV::BIvadd_vx_i8m8_m_vl: - case RISCV::BIvadd_vx_i16m8_m_vl: - case RISCV::BIvadd_vx_i32m8_m_vl: - case RISCV::BIvadd_vx_i64m8_m_vl: - case RISCV::BIvadd_vx_i8mf2_m_vl: - case RISCV::BIvadd_vx_i16mf2_m_vl: - case RISCV::BIvadd_vx_i32mf2_m_vl: - case RISCV::BIvadd_vx_i8mf4_m_vl: - case RISCV::BIvadd_vx_i16mf4_m_vl: - case RISCV::BIvadd_vx_i8mf8_m_vl: - case RISCV::BIvadd_vv_u8m1_m_vl: - case RISCV::BIvadd_vv_u16m1_m_vl: - case RISCV::BIvadd_vv_u32m1_m_vl: - case RISCV::BIvadd_vv_u64m1_m_vl: - case RISCV::BIvadd_vv_u8m2_m_vl: - case RISCV::BIvadd_vv_u16m2_m_vl: - case RISCV::BIvadd_vv_u32m2_m_vl: - case RISCV::BIvadd_vv_u64m2_m_vl: - case RISCV::BIvadd_vv_u8m4_m_vl: - case RISCV::BIvadd_vv_u16m4_m_vl: - case RISCV::BIvadd_vv_u32m4_m_vl: - case RISCV::BIvadd_vv_u64m4_m_vl: - case RISCV::BIvadd_vv_u8m8_m_vl: - case RISCV::BIvadd_vv_u16m8_m_vl: - case RISCV::BIvadd_vv_u32m8_m_vl: - case RISCV::BIvadd_vv_u64m8_m_vl: - case RISCV::BIvadd_vv_u8mf2_m_vl: - case RISCV::BIvadd_vv_u16mf2_m_vl: - case RISCV::BIvadd_vv_u32mf2_m_vl: - case RISCV::BIvadd_vv_u8mf4_m_vl: - case RISCV::BIvadd_vv_u16mf4_m_vl: - case RISCV::BIvadd_vv_u8mf8_m_vl: - case RISCV::BIvadd_vx_u8m1_m_vl: - case RISCV::BIvadd_vx_u16m1_m_vl: - case RISCV::BIvadd_vx_u32m1_m_vl: - case RISCV::BIvadd_vx_u64m1_m_vl: - case RISCV::BIvadd_vx_u8m2_m_vl: - case RISCV::BIvadd_vx_u16m2_m_vl: - case RISCV::BIvadd_vx_u32m2_m_vl: - case RISCV::BIvadd_vx_u64m2_m_vl: - case RISCV::BIvadd_vx_u8m4_m_vl: - case RISCV::BIvadd_vx_u16m4_m_vl: - case RISCV::BIvadd_vx_u32m4_m_vl: - case RISCV::BIvadd_vx_u64m4_m_vl: - case RISCV::BIvadd_vx_u8m8_m_vl: - case RISCV::BIvadd_vx_u16m8_m_vl: - case RISCV::BIvadd_vx_u32m8_m_vl: - case RISCV::BIvadd_vx_u64m8_m_vl: - case RISCV::BIvadd_vx_u8mf2_m_vl: - case RISCV::BIvadd_vx_u16mf2_m_vl: - case RISCV::BIvadd_vx_u32mf2_m_vl: - case RISCV::BIvadd_vx_u8mf4_m_vl: - case RISCV::BIvadd_vx_u16mf4_m_vl: - case RISCV::BIvadd_vx_u8mf8_m_vl: - ID = Intrinsic::riscv_vadd_mask; - // The order of operands is (mask, maskedoff, op1, op2, vl). - IntrinsicTypes = {ResultType, Ops[3]->getType(), Ops[4]->getType()}; - // The order of intrinsic operands is (maskedoff, op1, op2, mask, vl). - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); - break; +#include "clang/Basic/riscv_vector_builtin_cg.inc" } assert(ID != Intrinsic::not_intrinsic); diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt --- a/clang/lib/Headers/CMakeLists.txt +++ b/clang/lib/Headers/CMakeLists.txt @@ -209,6 +209,10 @@ clang_generate_header(-gen-arm-mve-header arm_mve.td arm_mve.h) # Generate arm_cde.h clang_generate_header(-gen-arm-cde-header arm_cde.td arm_cde.h) +# Generate riscv_vector.h +clang_generate_header(-gen-riscv-vector-header riscv_vector.td riscv_vector.h) +# Generate riscv_vector_generic.h +clang_generate_header(-gen-riscv-vector-generic-header riscv_vector.td riscv_vector_generic.h) add_custom_target(clang-resource-headers ALL DEPENDS ${out_files}) set_target_properties(clang-resource-headers PROPERTIES diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vadd.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vadd.c @@ -0,0 +1,2476 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t +// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vadd_m(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vfadd.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vfadd.c @@ -0,0 +1,516 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t +// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { + return vfadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { + return vfadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { + return vfadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { + return vfadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { + return vfadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { + return vfadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { + return vfadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { + return vfadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfadd_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { + return vfadd_m(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c @@ -0,0 +1,2476 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t +// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vadd_vv_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vadd_vv_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vadd_vv_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vadd_vv_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vadd_vv_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vadd_vv_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vadd_vv_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vadd_vv_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vadd_vv_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vadd_vv_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vadd_vv_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vadd_vv_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vadd_vv_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vadd_vv_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vadd_vx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vadd_vv_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vadd_vx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vadd_vv_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vadd_vx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vadd_vv_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vadd_vx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vadd_vv_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vadd_vx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vadd_vv_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vadd_vx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vadd_vv_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vadd_vx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vadd_vv_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vadd_vx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vadd_vv_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vadd_vx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vadd_vv_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vadd_vv_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vadd_vv_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vadd_vv_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vadd_vv_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vadd_vv_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vadd_vv_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vadd_vv_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vadd_vv_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vadd_vv_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vadd_vv_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vadd_vv_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vadd_vv_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vadd_vv_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vadd_vx_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vadd_vv_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vadd_vx_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vadd_vv_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vadd_vx_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vadd_vv_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vadd_vx_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vadd_vv_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vadd_vx_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vadd_vv_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vadd_vx_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vadd_vv_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vadd_vx_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vadd_vv_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vadd_vx_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vadd_vv_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vadd_vx_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vadd_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vadd_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vadd_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vadd_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vadd_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vadd_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vadd_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vadd_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vadd_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vadd_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vadd_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vadd_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vadd_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vadd_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vadd_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vadd_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vadd_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vadd_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vadd_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vadd_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vadd_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vadd_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vadd_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vadd_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vadd_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vadd_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vadd_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vadd_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vadd_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vadd_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vadd_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vadd_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vadd_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vadd_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vadd_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vadd_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vadd_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c @@ -0,0 +1,516 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t +// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfadd_vv_f32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { + return vfadd_vf_f32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfadd_vv_f32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { + return vfadd_vf_f32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfadd_vv_f32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { + return vfadd_vf_f32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfadd_vv_f32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { + return vfadd_vf_f32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfadd_vv_f32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { + return vfadd_vf_f32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfadd_vv_f64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { + return vfadd_vf_f64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfadd_vv_f64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { + return vfadd_vf_f64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfadd_vv_f64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { + return vfadd_vf_f64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfadd_vv_f64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { + return vfadd_vf_f64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfadd_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { + return vfadd_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfadd_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { + return vfadd_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfadd_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { + return vfadd_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfadd_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { + return vfadd_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfadd_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { + return vfadd_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfadd_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { + return vfadd_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfadd_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { + return vfadd_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfadd_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { + return vfadd_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfadd_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { + return vfadd_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/vadd.c b/clang/test/CodeGen/RISCV/vadd.c deleted file mode 100644 --- a/clang/test/CodeGen/RISCV/vadd.c +++ /dev/null @@ -1,2648 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py -// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v \ -// RUN: -O2 -emit-llvm %s -o - | FileCheck --check-prefix=CHECK-RV64-O2 %s -// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v \ -// RUN: -O2 -emit-llvm %s -o - | FileCheck --check-prefix=CHECK-RV32-O2 %s - -#include -#include - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m1_t test_vadd_vv_i8m1_vl(__rvv_int8m1_t arg_0, __rvv_int8m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i8m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m1_t test_vadd_vv_i8m1_m_vl(__rvv_bool8_t arg_0, __rvv_int8m1_t arg_1, __rvv_int8m1_t arg_2, __rvv_int8m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i8m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m1_t test_vadd_vv_i16m1_vl(__rvv_int16m1_t arg_0, __rvv_int16m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i16m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m1_t test_vadd_vv_i16m1_m_vl(__rvv_bool16_t arg_0, __rvv_int16m1_t arg_1, __rvv_int16m1_t arg_2, __rvv_int16m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i16m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m1_t test_vadd_vv_i32m1_vl(__rvv_int32m1_t arg_0, __rvv_int32m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i32m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m1_t test_vadd_vv_i32m1_m_vl(__rvv_bool32_t arg_0, __rvv_int32m1_t arg_1, __rvv_int32m1_t arg_2, __rvv_int32m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i32m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m1_t test_vadd_vv_i64m1_vl(__rvv_int64m1_t arg_0, __rvv_int64m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i64m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m1_t test_vadd_vv_i64m1_m_vl(__rvv_bool64_t arg_0, __rvv_int64m1_t arg_1, __rvv_int64m1_t arg_2, __rvv_int64m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i64m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m2_t test_vadd_vv_i8m2_vl(__rvv_int8m2_t arg_0, __rvv_int8m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i8m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m2_t test_vadd_vv_i8m2_m_vl(__rvv_bool4_t arg_0, __rvv_int8m2_t arg_1, __rvv_int8m2_t arg_2, __rvv_int8m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i8m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m2_t test_vadd_vv_i16m2_vl(__rvv_int16m2_t arg_0, __rvv_int16m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i16m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m2_t test_vadd_vv_i16m2_m_vl(__rvv_bool8_t arg_0, __rvv_int16m2_t arg_1, __rvv_int16m2_t arg_2, __rvv_int16m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i16m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m2_t test_vadd_vv_i32m2_vl(__rvv_int32m2_t arg_0, __rvv_int32m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i32m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m2_t test_vadd_vv_i32m2_m_vl(__rvv_bool16_t arg_0, __rvv_int32m2_t arg_1, __rvv_int32m2_t arg_2, __rvv_int32m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i32m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m2_t test_vadd_vv_i64m2_vl(__rvv_int64m2_t arg_0, __rvv_int64m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i64m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m2_t test_vadd_vv_i64m2_m_vl(__rvv_bool32_t arg_0, __rvv_int64m2_t arg_1, __rvv_int64m2_t arg_2, __rvv_int64m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i64m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m4_t test_vadd_vv_i8m4_vl(__rvv_int8m4_t arg_0, __rvv_int8m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i8m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m4_t test_vadd_vv_i8m4_m_vl(__rvv_bool2_t arg_0, __rvv_int8m4_t arg_1, __rvv_int8m4_t arg_2, __rvv_int8m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i8m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m4_t test_vadd_vv_i16m4_vl(__rvv_int16m4_t arg_0, __rvv_int16m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i16m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m4_t test_vadd_vv_i16m4_m_vl(__rvv_bool4_t arg_0, __rvv_int16m4_t arg_1, __rvv_int16m4_t arg_2, __rvv_int16m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i16m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m4_t test_vadd_vv_i32m4_vl(__rvv_int32m4_t arg_0, __rvv_int32m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i32m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m4_t test_vadd_vv_i32m4_m_vl(__rvv_bool8_t arg_0, __rvv_int32m4_t arg_1, __rvv_int32m4_t arg_2, __rvv_int32m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i32m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m4_t test_vadd_vv_i64m4_vl(__rvv_int64m4_t arg_0, __rvv_int64m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i64m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m4_t test_vadd_vv_i64m4_m_vl(__rvv_bool16_t arg_0, __rvv_int64m4_t arg_1, __rvv_int64m4_t arg_2, __rvv_int64m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i64m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m8_t test_vadd_vv_i8m8_vl(__rvv_int8m8_t arg_0, __rvv_int8m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i8m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m8_t test_vadd_vv_i8m8_m_vl(__rvv_bool1_t arg_0, __rvv_int8m8_t arg_1, __rvv_int8m8_t arg_2, __rvv_int8m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i8m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m8_t test_vadd_vv_i16m8_vl(__rvv_int16m8_t arg_0, __rvv_int16m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i16m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m8_t test_vadd_vv_i16m8_m_vl(__rvv_bool2_t arg_0, __rvv_int16m8_t arg_1, __rvv_int16m8_t arg_2, __rvv_int16m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i16m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m8_t test_vadd_vv_i32m8_vl(__rvv_int32m8_t arg_0, __rvv_int32m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i32m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m8_t test_vadd_vv_i32m8_m_vl(__rvv_bool4_t arg_0, __rvv_int32m8_t arg_1, __rvv_int32m8_t arg_2, __rvv_int32m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i32m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m8_t test_vadd_vv_i64m8_vl(__rvv_int64m8_t arg_0, __rvv_int64m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i64m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m8_t test_vadd_vv_i64m8_m_vl(__rvv_bool8_t arg_0, __rvv_int64m8_t arg_1, __rvv_int64m8_t arg_2, __rvv_int64m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i64m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf2_t test_vadd_vv_i8mf2_vl(__rvv_int8mf2_t arg_0, __rvv_int8mf2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i8mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf2_t test_vadd_vv_i8mf2_m_vl(__rvv_bool16_t arg_0, __rvv_int8mf2_t arg_1, __rvv_int8mf2_t arg_2, __rvv_int8mf2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i8mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf2_t test_vadd_vv_i16mf2_vl(__rvv_int16mf2_t arg_0, __rvv_int16mf2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i16mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf2_t test_vadd_vv_i16mf2_m_vl(__rvv_bool32_t arg_0, __rvv_int16mf2_t arg_1, __rvv_int16mf2_t arg_2, __rvv_int16mf2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i16mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32mf2_t test_vadd_vv_i32mf2_vl(__rvv_int32mf2_t arg_0, __rvv_int32mf2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i32mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32mf2_t test_vadd_vv_i32mf2_m_vl(__rvv_bool64_t arg_0, __rvv_int32mf2_t arg_1, __rvv_int32mf2_t arg_2, __rvv_int32mf2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i32mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf4_t test_vadd_vv_i8mf4_vl(__rvv_int8mf4_t arg_0, __rvv_int8mf4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i8mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf4_t test_vadd_vv_i8mf4_m_vl(__rvv_bool32_t arg_0, __rvv_int8mf4_t arg_1, __rvv_int8mf4_t arg_2, __rvv_int8mf4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i8mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf4_t test_vadd_vv_i16mf4_vl(__rvv_int16mf4_t arg_0, __rvv_int16mf4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i16mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf4_t test_vadd_vv_i16mf4_m_vl(__rvv_bool64_t arg_0, __rvv_int16mf4_t arg_1, __rvv_int16mf4_t arg_2, __rvv_int16mf4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i16mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf8_t test_vadd_vv_i8mf8_vl(__rvv_int8mf8_t arg_0, __rvv_int8mf8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i8mf8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf8_t test_vadd_vv_i8mf8_m_vl(__rvv_bool64_t arg_0, __rvv_int8mf8_t arg_1, __rvv_int8mf8_t arg_2, __rvv_int8mf8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i8mf8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m1_t test_vadd_vx_i8m1_vl(__rvv_int8m1_t arg_0, int8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i8m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m1_t test_vadd_vx_i8m1_m_vl(__rvv_bool8_t arg_0, __rvv_int8m1_t arg_1, __rvv_int8m1_t arg_2, int8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i8m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m1_t test_vadd_vx_i16m1_vl(__rvv_int16m1_t arg_0, int16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i16m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m1_t test_vadd_vx_i16m1_m_vl(__rvv_bool16_t arg_0, __rvv_int16m1_t arg_1, __rvv_int16m1_t arg_2, int16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i16m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m1_t test_vadd_vx_i32m1_vl(__rvv_int32m1_t arg_0, int32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i32m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m1_t test_vadd_vx_i32m1_m_vl(__rvv_bool32_t arg_0, __rvv_int32m1_t arg_1, __rvv_int32m1_t arg_2, int32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i32m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m1_t test_vadd_vx_i64m1_vl(__rvv_int64m1_t arg_0, int64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i64m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m1_t test_vadd_vx_i64m1_m_vl(__rvv_bool64_t arg_0, __rvv_int64m1_t arg_1, __rvv_int64m1_t arg_2, int64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i64m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m2_t test_vadd_vx_i8m2_vl(__rvv_int8m2_t arg_0, int8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i8m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m2_t test_vadd_vx_i8m2_m_vl(__rvv_bool4_t arg_0, __rvv_int8m2_t arg_1, __rvv_int8m2_t arg_2, int8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i8m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m2_t test_vadd_vx_i16m2_vl(__rvv_int16m2_t arg_0, int16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i16m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m2_t test_vadd_vx_i16m2_m_vl(__rvv_bool8_t arg_0, __rvv_int16m2_t arg_1, __rvv_int16m2_t arg_2, int16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i16m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m2_t test_vadd_vx_i32m2_vl(__rvv_int32m2_t arg_0, int32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i32m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m2_t test_vadd_vx_i32m2_m_vl(__rvv_bool16_t arg_0, __rvv_int32m2_t arg_1, __rvv_int32m2_t arg_2, int32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i32m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m2_t test_vadd_vx_i64m2_vl(__rvv_int64m2_t arg_0, int64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i64m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m2_t test_vadd_vx_i64m2_m_vl(__rvv_bool32_t arg_0, __rvv_int64m2_t arg_1, __rvv_int64m2_t arg_2, int64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i64m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m4_t test_vadd_vx_i8m4_vl(__rvv_int8m4_t arg_0, int8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i8m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m4_t test_vadd_vx_i8m4_m_vl(__rvv_bool2_t arg_0, __rvv_int8m4_t arg_1, __rvv_int8m4_t arg_2, int8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i8m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m4_t test_vadd_vx_i16m4_vl(__rvv_int16m4_t arg_0, int16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i16m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m4_t test_vadd_vx_i16m4_m_vl(__rvv_bool4_t arg_0, __rvv_int16m4_t arg_1, __rvv_int16m4_t arg_2, int16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i16m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m4_t test_vadd_vx_i32m4_vl(__rvv_int32m4_t arg_0, int32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i32m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m4_t test_vadd_vx_i32m4_m_vl(__rvv_bool8_t arg_0, __rvv_int32m4_t arg_1, __rvv_int32m4_t arg_2, int32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i32m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m4_t test_vadd_vx_i64m4_vl(__rvv_int64m4_t arg_0, int64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i64m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m4_t test_vadd_vx_i64m4_m_vl(__rvv_bool16_t arg_0, __rvv_int64m4_t arg_1, __rvv_int64m4_t arg_2, int64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i64m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m8_t test_vadd_vx_i8m8_vl(__rvv_int8m8_t arg_0, int8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i8m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m8_t test_vadd_vx_i8m8_m_vl(__rvv_bool1_t arg_0, __rvv_int8m8_t arg_1, __rvv_int8m8_t arg_2, int8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i8m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m8_t test_vadd_vx_i16m8_vl(__rvv_int16m8_t arg_0, int16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i16m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m8_t test_vadd_vx_i16m8_m_vl(__rvv_bool2_t arg_0, __rvv_int16m8_t arg_1, __rvv_int16m8_t arg_2, int16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i16m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m8_t test_vadd_vx_i32m8_vl(__rvv_int32m8_t arg_0, int32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i32m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m8_t test_vadd_vx_i32m8_m_vl(__rvv_bool4_t arg_0, __rvv_int32m8_t arg_1, __rvv_int32m8_t arg_2, int32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i32m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m8_t test_vadd_vx_i64m8_vl(__rvv_int64m8_t arg_0, int64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i64m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m8_t test_vadd_vx_i64m8_m_vl(__rvv_bool8_t arg_0, __rvv_int64m8_t arg_1, __rvv_int64m8_t arg_2, int64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i64m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf2_t test_vadd_vx_i8mf2_vl(__rvv_int8mf2_t arg_0, int8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i8mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf2_t test_vadd_vx_i8mf2_m_vl(__rvv_bool16_t arg_0, __rvv_int8mf2_t arg_1, __rvv_int8mf2_t arg_2, int8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i8mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf2_t test_vadd_vx_i16mf2_vl(__rvv_int16mf2_t arg_0, int16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i16mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf2_t test_vadd_vx_i16mf2_m_vl(__rvv_bool32_t arg_0, __rvv_int16mf2_t arg_1, __rvv_int16mf2_t arg_2, int16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i16mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32mf2_t test_vadd_vx_i32mf2_vl(__rvv_int32mf2_t arg_0, int32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i32mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32mf2_t test_vadd_vx_i32mf2_m_vl(__rvv_bool64_t arg_0, __rvv_int32mf2_t arg_1, __rvv_int32mf2_t arg_2, int32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i32mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf4_t test_vadd_vx_i8mf4_vl(__rvv_int8mf4_t arg_0, int8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i8mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf4_t test_vadd_vx_i8mf4_m_vl(__rvv_bool32_t arg_0, __rvv_int8mf4_t arg_1, __rvv_int8mf4_t arg_2, int8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i8mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf4_t test_vadd_vx_i16mf4_vl(__rvv_int16mf4_t arg_0, int16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i16mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf4_t test_vadd_vx_i16mf4_m_vl(__rvv_bool64_t arg_0, __rvv_int16mf4_t arg_1, __rvv_int16mf4_t arg_2, int16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i16mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf8_t test_vadd_vx_i8mf8_vl(__rvv_int8mf8_t arg_0, int8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i8mf8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf8_t test_vadd_vx_i8mf8_m_vl(__rvv_bool64_t arg_0, __rvv_int8mf8_t arg_1, __rvv_int8mf8_t arg_2, int8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i8mf8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m1_t test_vadd_vv_u8m1_vl(__rvv_uint8m1_t arg_0, __rvv_uint8m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u8m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m1_t test_vadd_vv_u8m1_m_vl(__rvv_bool8_t arg_0, __rvv_uint8m1_t arg_1, __rvv_uint8m1_t arg_2, __rvv_uint8m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u8m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m1_t test_vadd_vv_u16m1_vl(__rvv_uint16m1_t arg_0, __rvv_uint16m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u16m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m1_t test_vadd_vv_u16m1_m_vl(__rvv_bool16_t arg_0, __rvv_uint16m1_t arg_1, __rvv_uint16m1_t arg_2, __rvv_uint16m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u16m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m1_t test_vadd_vv_u32m1_vl(__rvv_uint32m1_t arg_0, __rvv_uint32m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u32m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m1_t test_vadd_vv_u32m1_m_vl(__rvv_bool32_t arg_0, __rvv_uint32m1_t arg_1, __rvv_uint32m1_t arg_2, __rvv_uint32m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u32m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m1_t test_vadd_vv_u64m1_vl(__rvv_uint64m1_t arg_0, __rvv_uint64m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u64m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m1_t test_vadd_vv_u64m1_m_vl(__rvv_bool64_t arg_0, __rvv_uint64m1_t arg_1, __rvv_uint64m1_t arg_2, __rvv_uint64m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u64m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m2_t test_vadd_vv_u8m2_vl(__rvv_uint8m2_t arg_0, __rvv_uint8m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u8m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m2_t test_vadd_vv_u8m2_m_vl(__rvv_bool4_t arg_0, __rvv_uint8m2_t arg_1, __rvv_uint8m2_t arg_2, __rvv_uint8m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u8m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m2_t test_vadd_vv_u16m2_vl(__rvv_uint16m2_t arg_0, __rvv_uint16m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u16m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m2_t test_vadd_vv_u16m2_m_vl(__rvv_bool8_t arg_0, __rvv_uint16m2_t arg_1, __rvv_uint16m2_t arg_2, __rvv_uint16m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u16m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m2_t test_vadd_vv_u32m2_vl(__rvv_uint32m2_t arg_0, __rvv_uint32m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u32m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m2_t test_vadd_vv_u32m2_m_vl(__rvv_bool16_t arg_0, __rvv_uint32m2_t arg_1, __rvv_uint32m2_t arg_2, __rvv_uint32m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u32m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m2_t test_vadd_vv_u64m2_vl(__rvv_uint64m2_t arg_0, __rvv_uint64m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u64m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m2_t test_vadd_vv_u64m2_m_vl(__rvv_bool32_t arg_0, __rvv_uint64m2_t arg_1, __rvv_uint64m2_t arg_2, __rvv_uint64m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u64m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m4_t test_vadd_vv_u8m4_vl(__rvv_uint8m4_t arg_0, __rvv_uint8m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u8m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m4_t test_vadd_vv_u8m4_m_vl(__rvv_bool2_t arg_0, __rvv_uint8m4_t arg_1, __rvv_uint8m4_t arg_2, __rvv_uint8m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u8m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m4_t test_vadd_vv_u16m4_vl(__rvv_uint16m4_t arg_0, __rvv_uint16m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u16m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m4_t test_vadd_vv_u16m4_m_vl(__rvv_bool4_t arg_0, __rvv_uint16m4_t arg_1, __rvv_uint16m4_t arg_2, __rvv_uint16m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u16m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m4_t test_vadd_vv_u32m4_vl(__rvv_uint32m4_t arg_0, __rvv_uint32m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u32m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m4_t test_vadd_vv_u32m4_m_vl(__rvv_bool8_t arg_0, __rvv_uint32m4_t arg_1, __rvv_uint32m4_t arg_2, __rvv_uint32m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u32m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m4_t test_vadd_vv_u64m4_vl(__rvv_uint64m4_t arg_0, __rvv_uint64m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u64m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m4_t test_vadd_vv_u64m4_m_vl(__rvv_bool16_t arg_0, __rvv_uint64m4_t arg_1, __rvv_uint64m4_t arg_2, __rvv_uint64m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u64m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m8_t test_vadd_vv_u8m8_vl(__rvv_uint8m8_t arg_0, __rvv_uint8m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u8m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m8_t test_vadd_vv_u8m8_m_vl(__rvv_bool1_t arg_0, __rvv_uint8m8_t arg_1, __rvv_uint8m8_t arg_2, __rvv_uint8m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u8m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m8_t test_vadd_vv_u16m8_vl(__rvv_uint16m8_t arg_0, __rvv_uint16m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u16m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m8_t test_vadd_vv_u16m8_m_vl(__rvv_bool2_t arg_0, __rvv_uint16m8_t arg_1, __rvv_uint16m8_t arg_2, __rvv_uint16m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u16m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m8_t test_vadd_vv_u32m8_vl(__rvv_uint32m8_t arg_0, __rvv_uint32m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u32m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m8_t test_vadd_vv_u32m8_m_vl(__rvv_bool4_t arg_0, __rvv_uint32m8_t arg_1, __rvv_uint32m8_t arg_2, __rvv_uint32m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u32m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m8_t test_vadd_vv_u64m8_vl(__rvv_uint64m8_t arg_0, __rvv_uint64m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u64m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m8_t test_vadd_vv_u64m8_m_vl(__rvv_bool8_t arg_0, __rvv_uint64m8_t arg_1, __rvv_uint64m8_t arg_2, __rvv_uint64m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u64m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf2_t test_vadd_vv_u8mf2_vl(__rvv_uint8mf2_t arg_0, __rvv_uint8mf2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u8mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf2_t test_vadd_vv_u8mf2_m_vl(__rvv_bool16_t arg_0, __rvv_uint8mf2_t arg_1, __rvv_uint8mf2_t arg_2, __rvv_uint8mf2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u8mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf2_t test_vadd_vv_u16mf2_vl(__rvv_uint16mf2_t arg_0, __rvv_uint16mf2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u16mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf2_t test_vadd_vv_u16mf2_m_vl(__rvv_bool32_t arg_0, __rvv_uint16mf2_t arg_1, __rvv_uint16mf2_t arg_2, __rvv_uint16mf2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u16mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32mf2_t test_vadd_vv_u32mf2_vl(__rvv_uint32mf2_t arg_0, __rvv_uint32mf2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u32mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32mf2_t test_vadd_vv_u32mf2_m_vl(__rvv_bool64_t arg_0, __rvv_uint32mf2_t arg_1, __rvv_uint32mf2_t arg_2, __rvv_uint32mf2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u32mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf4_t test_vadd_vv_u8mf4_vl(__rvv_uint8mf4_t arg_0, __rvv_uint8mf4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u8mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf4_t test_vadd_vv_u8mf4_m_vl(__rvv_bool32_t arg_0, __rvv_uint8mf4_t arg_1, __rvv_uint8mf4_t arg_2, __rvv_uint8mf4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u8mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf4_t test_vadd_vv_u16mf4_vl(__rvv_uint16mf4_t arg_0, __rvv_uint16mf4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u16mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf4_t test_vadd_vv_u16mf4_m_vl(__rvv_bool64_t arg_0, __rvv_uint16mf4_t arg_1, __rvv_uint16mf4_t arg_2, __rvv_uint16mf4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u16mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf8_t test_vadd_vv_u8mf8_vl(__rvv_uint8mf8_t arg_0, __rvv_uint8mf8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u8mf8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf8_t test_vadd_vv_u8mf8_m_vl(__rvv_bool64_t arg_0, __rvv_uint8mf8_t arg_1, __rvv_uint8mf8_t arg_2, __rvv_uint8mf8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u8mf8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m1_t test_vadd_vx_u8m1_vl(__rvv_uint8m1_t arg_0, uint8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u8m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m1_t test_vadd_vx_u8m1_m_vl(__rvv_bool8_t arg_0, __rvv_uint8m1_t arg_1, __rvv_uint8m1_t arg_2, uint8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u8m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m1_t test_vadd_vx_u16m1_vl(__rvv_uint16m1_t arg_0, uint16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u16m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m1_t test_vadd_vx_u16m1_m_vl(__rvv_bool16_t arg_0, __rvv_uint16m1_t arg_1, __rvv_uint16m1_t arg_2, uint16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u16m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m1_t test_vadd_vx_u32m1_vl(__rvv_uint32m1_t arg_0, uint32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u32m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m1_t test_vadd_vx_u32m1_m_vl(__rvv_bool32_t arg_0, __rvv_uint32m1_t arg_1, __rvv_uint32m1_t arg_2, uint32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u32m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m1_t test_vadd_vx_u64m1_vl(__rvv_uint64m1_t arg_0, uint64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u64m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m1_t test_vadd_vx_u64m1_m_vl(__rvv_bool64_t arg_0, __rvv_uint64m1_t arg_1, __rvv_uint64m1_t arg_2, uint64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u64m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m2_t test_vadd_vx_u8m2_vl(__rvv_uint8m2_t arg_0, uint8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u8m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m2_t test_vadd_vx_u8m2_m_vl(__rvv_bool4_t arg_0, __rvv_uint8m2_t arg_1, __rvv_uint8m2_t arg_2, uint8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u8m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m2_t test_vadd_vx_u16m2_vl(__rvv_uint16m2_t arg_0, uint16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u16m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m2_t test_vadd_vx_u16m2_m_vl(__rvv_bool8_t arg_0, __rvv_uint16m2_t arg_1, __rvv_uint16m2_t arg_2, uint16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u16m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m2_t test_vadd_vx_u32m2_vl(__rvv_uint32m2_t arg_0, uint32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u32m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m2_t test_vadd_vx_u32m2_m_vl(__rvv_bool16_t arg_0, __rvv_uint32m2_t arg_1, __rvv_uint32m2_t arg_2, uint32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u32m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m2_t test_vadd_vx_u64m2_vl(__rvv_uint64m2_t arg_0, uint64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u64m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m2_t test_vadd_vx_u64m2_m_vl(__rvv_bool32_t arg_0, __rvv_uint64m2_t arg_1, __rvv_uint64m2_t arg_2, uint64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u64m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m4_t test_vadd_vx_u8m4_vl(__rvv_uint8m4_t arg_0, uint8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u8m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m4_t test_vadd_vx_u8m4_m_vl(__rvv_bool2_t arg_0, __rvv_uint8m4_t arg_1, __rvv_uint8m4_t arg_2, uint8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u8m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m4_t test_vadd_vx_u16m4_vl(__rvv_uint16m4_t arg_0, uint16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u16m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m4_t test_vadd_vx_u16m4_m_vl(__rvv_bool4_t arg_0, __rvv_uint16m4_t arg_1, __rvv_uint16m4_t arg_2, uint16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u16m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m4_t test_vadd_vx_u32m4_vl(__rvv_uint32m4_t arg_0, uint32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u32m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m4_t test_vadd_vx_u32m4_m_vl(__rvv_bool8_t arg_0, __rvv_uint32m4_t arg_1, __rvv_uint32m4_t arg_2, uint32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u32m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m4_t test_vadd_vx_u64m4_vl(__rvv_uint64m4_t arg_0, uint64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u64m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m4_t test_vadd_vx_u64m4_m_vl(__rvv_bool16_t arg_0, __rvv_uint64m4_t arg_1, __rvv_uint64m4_t arg_2, uint64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u64m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m8_t test_vadd_vx_u8m8_vl(__rvv_uint8m8_t arg_0, uint8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u8m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m8_t test_vadd_vx_u8m8_m_vl(__rvv_bool1_t arg_0, __rvv_uint8m8_t arg_1, __rvv_uint8m8_t arg_2, uint8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u8m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m8_t test_vadd_vx_u16m8_vl(__rvv_uint16m8_t arg_0, uint16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u16m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m8_t test_vadd_vx_u16m8_m_vl(__rvv_bool2_t arg_0, __rvv_uint16m8_t arg_1, __rvv_uint16m8_t arg_2, uint16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u16m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m8_t test_vadd_vx_u32m8_vl(__rvv_uint32m8_t arg_0, uint32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u32m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m8_t test_vadd_vx_u32m8_m_vl(__rvv_bool4_t arg_0, __rvv_uint32m8_t arg_1, __rvv_uint32m8_t arg_2, uint32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u32m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m8_t test_vadd_vx_u64m8_vl(__rvv_uint64m8_t arg_0, uint64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u64m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m8_t test_vadd_vx_u64m8_m_vl(__rvv_bool8_t arg_0, __rvv_uint64m8_t arg_1, __rvv_uint64m8_t arg_2, uint64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u64m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf2_t test_vadd_vx_u8mf2_vl(__rvv_uint8mf2_t arg_0, uint8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u8mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf2_t test_vadd_vx_u8mf2_m_vl(__rvv_bool16_t arg_0, __rvv_uint8mf2_t arg_1, __rvv_uint8mf2_t arg_2, uint8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u8mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf2_t test_vadd_vx_u16mf2_vl(__rvv_uint16mf2_t arg_0, uint16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u16mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf2_t test_vadd_vx_u16mf2_m_vl(__rvv_bool32_t arg_0, __rvv_uint16mf2_t arg_1, __rvv_uint16mf2_t arg_2, uint16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u16mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32mf2_t test_vadd_vx_u32mf2_vl(__rvv_uint32mf2_t arg_0, uint32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u32mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32mf2_t test_vadd_vx_u32mf2_m_vl(__rvv_bool64_t arg_0, __rvv_uint32mf2_t arg_1, __rvv_uint32mf2_t arg_2, uint32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u32mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf4_t test_vadd_vx_u8mf4_vl(__rvv_uint8mf4_t arg_0, uint8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u8mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf4_t test_vadd_vx_u8mf4_m_vl(__rvv_bool32_t arg_0, __rvv_uint8mf4_t arg_1, __rvv_uint8mf4_t arg_2, uint8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u8mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf4_t test_vadd_vx_u16mf4_vl(__rvv_uint16mf4_t arg_0, uint16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u16mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf4_t test_vadd_vx_u16mf4_m_vl(__rvv_bool64_t arg_0, __rvv_uint16mf4_t arg_1, __rvv_uint16mf4_t arg_2, uint16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u16mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf8_t test_vadd_vx_u8mf8_vl(__rvv_uint8mf8_t arg_0, uint8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u8mf8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf8_t test_vadd_vx_u8mf8_m_vl(__rvv_bool64_t arg_0, __rvv_uint8mf8_t arg_1, __rvv_uint8mf8_t arg_2, uint8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u8mf8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} diff --git a/clang/test/Headers/riscv-vector-header.c b/clang/test/Headers/riscv-vector-header.c new file mode 100644 --- /dev/null +++ b/clang/test/Headers/riscv-vector-header.c @@ -0,0 +1,6 @@ +// RUN: %clang_cc1 -triple riscv64 -fsyntax-only \ +// RUN: -target-feature +m -target-feature +a -target-feature +f \ +// RUN: -target-feature +d -target-feature +experimental-v %s +// expected-no-diagnostics + +#include diff --git a/clang/utils/TableGen/CMakeLists.txt b/clang/utils/TableGen/CMakeLists.txt --- a/clang/utils/TableGen/CMakeLists.txt +++ b/clang/utils/TableGen/CMakeLists.txt @@ -18,6 +18,7 @@ ClangTypeNodesEmitter.cpp MveEmitter.cpp NeonEmitter.cpp + RISCVVEmitter.cpp SveEmitter.cpp TableGen.cpp ) diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp new file mode 100644 --- /dev/null +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -0,0 +1,1043 @@ +//===- RISCVVEmitter.cpp - Generate riscv_vector.h for use with clang -----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This tablegen backend is responsible for emitting riscv_vector.h and +// riscv_vector_generic.h, which includes a declaration and definition of each +// intrinsic fucntions specified in https://github.com/riscv/rvv-intrinsic-doc. +// +// See also the documentation in include/clang/Basic/riscv_vector.td. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/ADT/StringSet.h" +#include "llvm/ADT/Twine.h" +#include "llvm/TableGen/Record.h" + +using namespace llvm; +using BasicType = char; +using VScaleVal = Optional; + +namespace { + +// Exponential LMUL +class LMULType { +private: + int Log2LMUL; + +public: + LMULType(int Log2LMUL); + // Return the C/C++ string representation of LMUL + std::string str() const; + Optional getScale(unsigned ElementBitwidth) const; + LMULType &operator*=(unsigned RHS); +}; + +// This class is compact representation of a valid and invalid RVVType. +class RVVType { + BasicType BT; + LMULType LMUL; + bool IsFloat, IsBool, IsSigned; + // IsConstant indices are "int", but have the constant expression. + bool IsImmediate; + bool IsVoid; + // const qualifier. + bool IsConstant; + bool IsPointer; + bool IsSize_t, IsPtrdiff_t; + unsigned ElementBitwidth; + VScaleVal Scale; + bool Valid; + + std::string BuiltinStr; + std::string ClangBuiltinStr; + std::string Str; + std::string ShortStr; + +public: + RVVType() : RVVType(BasicType(), 0, StringRef()) {} + RVVType(BasicType BT, int Log2LMUL, StringRef prototype); + + // Return the string representation of a type, which is an encoded string for + // passing to the BUILTIN() macro in Builtins.def. + const std::string &builtin_str() const { return BuiltinStr; } + + // Return the clang buitlin type for RVV vector type which are used in the + // riscv_vector.h header file. + const std::string &clang_builtin_str() const { return ClangBuiltinStr; } + + // Return the C/C++ string representation of a type for use in the + // riscv_vector.h header file. + const std::string &type_str() const { return Str; } + + // Return the short name of a type for C/C++ name suffix. + const std::string &short_str() const { return ShortStr; } + + bool isValid() const { return Valid; } + bool isScalar() const { return Scale.hasValue() && Scale.getValue() == 0; } + bool isVector() const { return Scale.hasValue() && Scale.getValue() != 0; } + bool isFloatVector(unsigned Width) const { + return isVector() && IsFloat && ElementBitwidth == Width; + } + +private: + // Verify RVV vector type and set Valid. + bool verifyType() const; + + // Creates a type based on basic types of TypeRange + void applyBasicType(); + + // Applies a prototype modifier to the current type. The result maybe an + // invalid type. + void applyModifier(StringRef prototype); + + // Compute and record a string for legal type. + void compute_builtin_str(); + // Compute and record a builtin RVV vector type string. + void compute_clang_builtin_str(); + // Compute and record a type string for used in the header. + void compute_type_str(); + // Compute and record a short name of a type for C/C++ name suffix. + void compute_short_str(); +}; + +using RVVTypePtr = RVVType *; +using RVVTypes = std::vector; + +enum RISCV_Extension : uint8_t { + Basic = 0, + F = 1 << 1, + D = 1 << 2, + ZFH = 1 << 3 +}; + +// TODO refactor RVVIntrinsic class design after support all intrinsic +// combination. This represents an instantiation of an intrinsic with a +// particular type and prototype +class RVVIntrinsic { + +private: + std::string Name; // Builtin name + std::string MangledName; + std::string IRName; + bool HasSideEffects; + bool IsMask; + bool HasMaskedOffOperand; + bool HasVL; + bool HasGeneric; + RVVTypes Types; // Include output and other input + std::vector IntrinsicTypes; // Type name in LLVM IR intrinsic suffix + uint8_t RISCV_Extensions = 0; + +public: + RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName, + StringRef IRName, bool HasSideEffects, bool IsMask, + bool HasMaskedOffOperand, bool HasVL, bool HasGeneric, + const RVVTypes &Types, + const std::vector &RVVIntrinsicTypes); + ~RVVIntrinsic() = default; + + StringRef getName() const { return Name; } + StringRef getMangledName() const { return MangledName; } + bool hasSideEffects() const { return HasSideEffects; } + bool hasMaskedOffOperand() const { return HasMaskedOffOperand; } + bool hasVL() const { return HasVL; } + bool hasGeneric() const { return HasGeneric; } + size_t getNumOperand() const { return Types.size() - 1; } + // Get output and input types + ArrayRef getTypes() const { return Types; } + ArrayRef getIntrinsicTypes() const { return IntrinsicTypes; } + std::string getIRName() const { return IRName; } + uint8_t getRISCV_Extensions() const { return RISCV_Extensions; } + + // Return the type string for a BUILTIN() macro in Builtins.def. + std::string getBuiltinTypeStr() const; + + // Emit the code block for switch body in EmitRISCVBuiltinExpr, it should + // init the RVVIntrinsic ID and IntrinsicTypes. + void emitCodeGenSwitchBody(raw_ostream &o) const; + + // Emit the function declaration with given function name. argument naming + // are arg_1, arg_2, ... + void emitFuncDecl(raw_ostream &o, bool IsMangled) const; + + // Emit the mangled function definition. + void emitMangledFuncDef(raw_ostream &o) const; +}; + +using TypeString = std::string; +class RVVEmitter { +private: + RecordKeeper &Records; + // Concat BasicType, LMUL and Proto as key + StringMap LegalTypes; + StringSet<> IllegalTypes; + +public: + RVVEmitter(RecordKeeper &R) : Records(R) {} + + /// Emit riscv_vector.h + void createHeader(raw_ostream &o); + + /// Emit riscv_generic.h + void createGenericHeader(raw_ostream &o); + + /// Emit all the __builtin prototypes and code needed by Sema. + void createBuiltins(raw_ostream &o); + + /// Emit all the information needed to map builtin -> LLVM IR intrinsic. + void createCodeGen(raw_ostream &o); + +private: + /// Create all intrinsics and add them to \p Out + void createRVVIntrinsics(std::vector> &Out); + /// Compute output and input types by applying different config (basic type + /// and LMUL with type transformers). It also record result of type in legal + /// or illegal set to avoid compute the same config again. The result maybe + /// have illegal RVVType. + Optional computeTypes(BasicType BT, int Log2LMUL, + ArrayRef PrototypeSeq); + Optional computeType(BasicType BT, int Log2LMUL, StringRef Proto); + + /// Emit Acrh predecessor definitions and body + void emitArchMacroAndBody( + std::vector> &Defs, raw_ostream &o, + std::function); + + // Return the architecture preprocessor definitions. + SmallVector getExtStrings(uint8_t Extensions); +}; + +} // namespace + +//===----------------------------------------------------------------------===// +// Type implementation +//===----------------------------------------------------------------------===// + +LMULType::LMULType(int NewLog2LMUL) { + // Check Log2LMUL is -3, -2, -1, 0, 1, 2, 3 + assert(NewLog2LMUL <= 3 && NewLog2LMUL >= -3 && "Bad LMUL number!"); + Log2LMUL = NewLog2LMUL; +} + +std::string LMULType::str() const { + if (Log2LMUL < 0) + return "mf" + utostr(1 << (-Log2LMUL)); + return "m" + utostr(1 << Log2LMUL); +} + +VScaleVal LMULType::getScale(unsigned ElementBitwidth) const { + int ExpResult = 0; + switch (ElementBitwidth) { + default: + break; + case 8: + ExpResult = Log2LMUL + 3; + break; + case 16: + ExpResult = Log2LMUL + 2; + break; + case 32: + ExpResult = Log2LMUL + 1; + break; + case 64: + ExpResult = Log2LMUL; + break; + } + // Illegal vscale result would be less than 1 + if (ExpResult < 0) + return None; + return Optional(1 << ExpResult); +} + +LMULType &LMULType::operator*=(unsigned RHS) { + this->Log2LMUL = this->Log2LMUL + RHS; + return *this; +} + +RVVType::RVVType(BasicType BT, int Log2LMUL, StringRef prototype) + : BT(BT), LMUL(LMULType(Log2LMUL)), IsFloat(false), IsBool(false), + IsSigned(true), IsImmediate(false), IsVoid(false), IsConstant(false), + IsPointer(false), IsSize_t(false), IsPtrdiff_t(false), + ElementBitwidth(~0U), Scale(0) { + applyBasicType(); + applyModifier(prototype); + Valid = verifyType(); + if (Valid) { + compute_builtin_str(); + compute_type_str(); + if (isVector()) { + compute_clang_builtin_str(); + compute_short_str(); + } + } +} + +// clang-format off +// boolean type are encoded the ratio of n (SEW/LMUL) +// SEW/LMUL | 1 | 2 | 4 | 8 | 16 | 32 | 64 +// c type | vbool64_t | vbool32_t | vbool16_t | vbool8_t | vbool4_t | vbool2_t | vbool1_t +// IR type | nxv1i1 | nxv2i1 | nxv4i1 | nxv8i1 | nxv16i1 | nxv32i1 | nxv64i1 + +// type\lmul | 1/8 | 1/4 | 1/2 | 1 | 2 | 4 | 8 +// -------- |------ | -------- | ------- | ------- | -------- | -------- | -------- +// i64 | N/A | N/A | N/A | nxv1i64 | nxv2i64 | nxv4i64 | nxv8i64 +// i32 | N/A | N/A | nxv1i32 | nxv2i32 | nxv4i32 | nxv8i32 | nxv16i32 +// i16 | N/A | nxv1i16 | nxv2i16 | nxv4i16 | nxv8i16 | nxv16i16 | nxv32i16 +// i8 | nxv1i8 | nxv2i8 | nxv4i8 | nxv8i8 | nxv16i8 | nxv32i8 | nxv64i8 +// double | N/A | N/A | N/A | nxv1f64 | nxv2f64 | nxv4f64 | nxv8f64 +// float | N/A | N/A | nxv1f32 | nxv2f32 | nxv4f32 | nxv8f32 | nxv16f32 +// half | N/A | nxv1f16 | nxv2f16 | nxv4f16 | nxv8f16 | nxv16f16 | nxv32f16 +// clang-format on + +bool RVVType::verifyType() const { + if (isScalar()) + return true; + if (!Scale.hasValue()) + return false; + if (IsFloat && ElementBitwidth == 8) + return false; + unsigned V = Scale.getValue(); + switch (ElementBitwidth) { + case 1: + case 8: + // Check Scale is 1,2,4,8,16,32,64 + return (V <= 64 && isPowerOf2_32(V)); + case 16: + // Check Scale is 1,2,4,8,16,32 + return (V <= 32 && isPowerOf2_32(V)); + case 32: + // Check Scale is 1,2,4,8,16 + return (V <= 16 && isPowerOf2_32(V)); + case 64: + // Check Scale is 1,2,4,8 + return (V <= 8 && isPowerOf2_32(V)); + } + return false; +} + +void RVVType::compute_builtin_str() { + assert(isValid() && "RVVType is invalid"); + std::string &S = BuiltinStr; + if (IsVoid) { + S = "v"; + return; + } else if (IsSize_t) { + S = "z"; + if (IsImmediate) + S = "I" + S; + return; + } else if (IsPtrdiff_t) { + S = "Y"; + return; + } + + if (!IsFloat) { + switch (ElementBitwidth) { + case 1: + S += "b"; + break; + case 8: + S += "c"; + break; + case 16: + S += "s"; + break; + case 32: + S += "i"; + break; + case 64: + S += "Wi"; + break; + default: + llvm_unreachable("Unhandled case!"); + } + if (!IsBool) { + if (IsSigned) + S = "S" + S; + else + S = "U" + S; + } + if (IsImmediate) + S = "I" + S; + } else { + switch (ElementBitwidth) { + case 16: + S += "h"; + break; + case 32: + S += "f"; + break; + case 64: + S += "d"; + break; + default: + llvm_unreachable("Unhandled case!"); + } + } + if (isScalar()) { + if (IsConstant) + S += "C"; + if (IsPointer) + S += "*"; + return; + } + S = "q" + utostr(Scale.getValue()) + S; +} + +void RVVType::compute_clang_builtin_str() { + assert(isValid() && "RVVType is invalid"); + assert(isVector() && "Handle Vector type only"); + + std::string &S = ClangBuiltinStr; + S += "__rvv_"; + if (IsBool) + S += "bool"; + else if (IsFloat) + S += "float"; + else if (IsSigned) + S += "int"; + else + S += "uint"; + if (IsBool) + S += utostr(64 / Scale.getValue()); + else + S += utostr(ElementBitwidth); + if (!IsBool) { + S += LMUL.str(); + } + S += "_t"; +} + +void RVVType::compute_type_str() { + assert(isValid() && "RVVType is invalid"); + std::string &S = Str; + if (IsVoid) { + S = "void"; + return; + } else if (IsSize_t) { + S = "size_t"; + return; + } else if (IsPtrdiff_t) { + S = "ptriff_t"; + return; + } + if (IsConstant) + S += "const "; + if (isVector()) + S += "v"; + if (IsBool) + S += "bool"; + else if (IsFloat) + S += "float"; + else if (IsSigned) + S += "int"; + else + S += "uint"; + // Vector bool is special case, the formulate is `vbool_t = + // MVT::nxv<64/N>i1` ex. vbool16_t = MVT:: + if (IsBool && isVector()) + S += utostr(64 / Scale.getValue()); + else + S += utostr(ElementBitwidth); + // Non bool RVV vector type has LMUL + if (isVector() && !IsBool) { + S += LMUL.str(); + } + S += "_t"; + if (IsPointer) + S += " *"; +} + +void RVVType::compute_short_str() { + assert(isVector() && "only handle vector type"); + if (IsBool) { + ShortStr = "b" + utostr(64 / Scale.getValue()); + return; + } + std::string &S = ShortStr; + if (IsFloat) + S = "f"; + else if (IsSigned) + S = "i"; + else + S = "u"; + S += utostr(ElementBitwidth) + LMUL.str(); +} + +void RVVType::applyBasicType() { + switch (BT) { + case 'c': + ElementBitwidth = 8; + break; + case 's': + ElementBitwidth = 16; + break; + case 'i': + ElementBitwidth = 32; + break; + case 'l': + ElementBitwidth = 64; + break; + case 'h': + ElementBitwidth = 16; + IsFloat = true; + break; + case 'f': + ElementBitwidth = 32; + IsFloat = true; + break; + case 'd': + ElementBitwidth = 64; + IsFloat = true; + break; + default: + llvm_unreachable("Unhandled type code!"); + } + assert(ElementBitwidth != ~0U && "Bad element bitwidth!"); +} + +void RVVType::applyModifier(StringRef Transformer) { + if (Transformer.empty()) + return; + // Handle primitive type transformer + switch (Transformer.back()) { + case 'e': + Scale = 0; + break; + case 'v': + Scale = LMUL.getScale(ElementBitwidth); + break; + case 'w': + ElementBitwidth *= 2; + LMUL *= 2; + Scale = LMUL.getScale(ElementBitwidth); + break; + case 'q': + ElementBitwidth *= 4; + LMUL *= 4; + Scale = LMUL.getScale(ElementBitwidth); + break; + case 'o': + ElementBitwidth *= 8; + LMUL *= 8; + Scale = LMUL.getScale(ElementBitwidth); + break; + case 'm': + IsBool = true; + IsFloat = false; + Scale = LMUL.getScale(ElementBitwidth); + ElementBitwidth = 1; + break; + case '0': + IsVoid = true; + break; + case 'z': + IsSize_t = true; + break; + case 't': + IsPtrdiff_t = true; + break; + case 'c': // uint8_t + IsSigned = false; + ElementBitwidth = 8; + Scale = 0; + break; + default: + llvm_unreachable("Illegal primitive type transformers!"); + } + + // Compute type transformers + for (char I : Transformer.take_front(Transformer.size() - 1)) { + switch (I) { + case 'P': + IsPointer = true; + break; + case 'C': + IsConstant = true; + break; + case 'K': + IsImmediate = true; + break; + case 'U': + IsSigned = false; + break; + case 'I': + IsFloat = false; + break; + case 'F': + IsFloat = true; + break; + case 'W': + assert(isVector() && "'W' type transformer cannot be used on vectors"); + ElementBitwidth *= 2; + break; + case 'S': + LMUL = LMULType(0); + break; + default: + llvm_unreachable("Illegal non-primitive type transformer!"); + } + } +} + +//===----------------------------------------------------------------------===// +// RVVIntrinsic implementation +//===----------------------------------------------------------------------===// +RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix, + StringRef NewMangledName, StringRef IRName, + bool HasSideEffects, bool IsMask, + bool HasMaskedOffOperand, bool HasVL, + bool HasGeneric, const RVVTypes &Types, + const std::vector &IntrinsicTypes) + : IRName(IRName), HasSideEffects(HasSideEffects), IsMask(IsMask), + HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), + HasGeneric(HasGeneric), Types(Types), IntrinsicTypes(IntrinsicTypes) { + + // Init Name and MangledName + Name = NewName.str(); + if (NewMangledName.empty()) + MangledName = Twine(NewName.split("_").first).str(); + else + MangledName = NewMangledName.str(); + if (Suffix.size()) + Name += "_" + Suffix.str(); + if (IsMask) { + Name += "_m"; + MangledName += "_m"; + } + // Init RISCV_Extensions + for (const auto &T : Types) { + if (T->isFloatVector(16)) + RISCV_Extensions |= RISCV_Extension::ZFH; + else if (T->isFloatVector(32)) + RISCV_Extensions |= RISCV_Extension::F; + else if (T->isFloatVector(64)) + RISCV_Extensions |= RISCV_Extension::D; + } +} + +std::string RVVIntrinsic::getBuiltinTypeStr() const { + std::string S; + for (const auto &T : Types) { + S += T->builtin_str(); + } + return S; +} + +void RVVIntrinsic::emitCodeGenSwitchBody(raw_ostream &OS) const { + + auto getIntrinsicTypesString = + [this](const std::vector &IntrinsicTypes, raw_ostream &OS) { + OS << " ID = Intrinsic::riscv_" + getIRName() + ";\n"; + + OS << " IntrinsicTypes = {"; + for (const auto &Idx : IntrinsicTypes) { + if (Idx == -1) + OS << "ResultType"; + else + OS << ", Ops[" + utostr(static_cast(Idx)) + + "]->getType()"; + } + + // VL could be i64 or i32, need to encode it in IntrinsicTypes. VL is + // always last operand. + if (hasVL()) + OS << ", Ops[" + utostr(getNumOperand() - 1) + "]->getType()"; + OS << "};\n"; + }; + + if (!IsMask) { + getIntrinsicTypesString(getIntrinsicTypes(), OS); + return; + } + // IntrinsicTypes is the nonmasked version index, we need to update + // it. (It does not count the additional mask operand and maskedoff operand.) + signed Skew = 1; + if (hasMaskedOffOperand()) + Skew = 2; + std::vector NewIntrinsicTypes = getIntrinsicTypes(); + for (auto &I : NewIntrinsicTypes) { + if (I >= 0) + I += Skew; + } + getIntrinsicTypesString(NewIntrinsicTypes, OS); + + // The order of operands is (mask, maskedoff, op1, op..., vl). + // The order of intrinsic operands is (maskedoff, op1, op..., mask, vl) + // for masked operation with mask and maskedoff + // or + // The order of operands is (mask, op1, op..., vl). + // The order of intrinsic operands is (op1, op..., mask, vl) + // for masked operation with mask only. + OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);\n"; +} + +void RVVIntrinsic::emitFuncDecl(raw_ostream &OS, bool IsMangled) const { + // Index 0 is output type + OS << Twine(Types[0]->type_str() + Twine(" ")).str(); + if (IsMangled) + OS << getMangledName(); + else + OS << getName(); + OS << "("; + + // Emit function arguments + if (Types.size() > 1) { + OS << Types[1]->type_str() + " arg_1"; + for (unsigned i = 2; i < Types.size(); ++i) + OS << ", " << Types[i]->type_str() << " arg_" << Twine(i); + } + OS << ")"; +} + +void RVVIntrinsic::emitMangledFuncDef(raw_ostream &OS) const { + emitFuncDecl(OS, /*IsMangled=*/true); + OS << "{\n"; + OS << " return " + getName() + "("; + // Emit parameter variables + if (Types.size() > 1) { + OS << "arg_1"; + for (unsigned i = 2; i < Types.size(); ++i) + OS << ", arg_" << Twine(i); + } + OS << ");\n"; + OS << "}\n\n"; +} + +//===----------------------------------------------------------------------===// +// RVVEmitter implementation +//===----------------------------------------------------------------------===// +void RVVEmitter::createHeader(raw_ostream &OS) { + + OS << "/*===---- riscv_vector.h - RISC-V V-extension RVVIntrinsics " + "-------------------===\n" + " *\n" + " *\n" + " * Part of the LLVM Project, under the Apache License v2.0 with LLVM " + "Exceptions.\n" + " * See https://llvm.org/LICENSE.txt for license information.\n" + " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" + " *\n" + " *===-----------------------------------------------------------------" + "------===\n" + " */\n\n"; + + OS << "#ifndef _RISCV_VECTOR_H\n"; + OS << "#define _RISCV_VECTOR_H\n\n"; + + OS << "#include \n"; + OS << "#include \n\n"; + + OS << "#ifndef __riscv_vector\n"; + OS << "#error \"Vector intrinsics require the vector extension.\"\n"; + OS << "#else\n\n"; + + // TODO: Upadte psABI to define float16_t type. + // OS << "typedef __fp16 float16_t;\n"; + OS << "typedef float float32_t;\n"; + OS << "typedef double float64_t;\n\n"; + + OS << "#ifdef __cplusplus\n"; + OS << "extern \"C\" {\n"; + OS << "#endif\n\n"; + + std::vector> Defs; + createRVVIntrinsics(Defs); + + // Dump RVV boolean types. + auto dumpType = [&](auto T) { + OS << "typedef " << T->clang_builtin_str() << " " << T->type_str() << ";\n"; + }; + + constexpr int Log2LMULs[] = {-3, -2, -1, 0, 1, 2, 3}; + for (int Log2LMUL : Log2LMULs) { + auto T = computeType('c', Log2LMUL, "m"); + if (T.hasValue()) + dumpType(T.getValue()); + } + // Dump RVV int/float types. + for (char I : StringRef("csil")) { + for (int Log2LMUL : Log2LMULs) { + auto T = computeType(I, Log2LMUL, "v"); + if (T.hasValue()) { + dumpType(T.getValue()); + auto UT = computeType(I, Log2LMUL, "Uv"); + dumpType(UT.getValue()); + } + } + } + // Dump RVV float types. + OS << "#if defined(__riscv_zfh)\n"; + for (int Log2LMUL : Log2LMULs) { + auto T = computeType('h', Log2LMUL, "v"); + // first. + if (T.hasValue()) + dumpType(T.getValue()); + } + OS << "#endif\n"; + + // D implies F + OS << "#if defined(__riscv_f) || defined(__riscv_d)\n"; + for (int Log2LMUL : Log2LMULs) { + auto T = computeType('f', Log2LMUL, "v"); + if (T.hasValue()) + dumpType(T.getValue()); + } + OS << "#endif\n"; + + OS << "#if defined(__riscv_d)\n"; + for (int ELMul : Log2LMULs) { + auto T = computeType('d', ELMul, "v"); + if (T.hasValue()) + dumpType(T.getValue()); + } + OS << "#endif\n\n"; + + // Dump intrinsic functions with macro + emitArchMacroAndBody(Defs, OS, [](raw_ostream &OS, const RVVIntrinsic &Inst) { + OS << "// "; + Inst.emitFuncDecl(OS, /*IsMangled=*/false); + OS << "\n#define " << Inst.getName() << "(...) __builtin_rvv_" + << Inst.getName() << "(__VA_ARGS__)\n"; + }); + + OS << "\n#ifdef __cplusplus\n"; + OS << "}\n"; + OS << "#endif\n"; + OS << "#endif // __riscv_vector\n"; + OS << "#endif // _RISCV_VECTOR_H\n"; +} + +void RVVEmitter::createGenericHeader(raw_ostream &OS) { + std::vector> Defs; + createRVVIntrinsics(Defs); + + OS << "#include \n\n"; + // Dump intrinsic functions macro + emitArchMacroAndBody(Defs, OS, [](raw_ostream &OS, const RVVIntrinsic &Inst) { + if (!Inst.hasGeneric()) + return; + OS << StringRef( + "static inline __attribute__((__always_inline__, __nodebug__, " + "__overloadable__))\n"); + Inst.emitMangledFuncDef(OS); + }); +} + +void RVVEmitter::createBuiltins(raw_ostream &OS) { + std::vector> Defs; + createRVVIntrinsics(Defs); + + OS << "#if defined(BUILTIN) && !defined(RISCVV_BUILTIN)\n"; + OS << "#define RISCVV_BUILTIN(ID, TYPE, ATTRS) BUILTIN(ID, TYPE, ATTRS)\n"; + OS << "#endif\n"; + for (auto &Def : Defs) { + OS << "RISCVV_BUILTIN(" << Def->getName() << ",\"" + << Def->getBuiltinTypeStr() << "\", "; + if (!Def->hasSideEffects()) + OS << "\"n\")\n"; + else + OS << "\"\")\n"; + } + OS << "\n#undef BUILTIN\n"; + OS << "#undef RISCVV_BUILTIN\n"; +} + +void RVVEmitter::createCodeGen(raw_ostream &OS) { + std::vector> Defs; + createRVVIntrinsics(Defs); + + // The same intrinsic name has the same switch body. + llvm::StringMap, 128>> DefsSet; + for (auto &def : Defs) { + DefsSet[def->getIRName()].push_back(std::move(def)); + } + for (const auto &KV : DefsSet) { + for (const auto &I : KV.getValue()) { + OS << "case RISCV::BI" << I->getName() << ":\n"; + } + KV.getValue()[0]->emitCodeGenSwitchBody(OS); + OS << "\n break;\n"; + } +} + +void RVVEmitter::createRVVIntrinsics( + std::vector> &Out) { + + std::vector RV = Records.getAllDerivedDefinitions("RVVBuiltin"); + for (auto *R : RV) { + StringRef Name = R->getValueAsString("Name"); + StringRef Suffix = R->getValueAsString("Suffix"); + StringRef MangledName = R->getValueAsString("MangledName"); + StringRef Prototypes = R->getValueAsString("Prototype"); + StringRef TypeRange = R->getValueAsString("TypeRange"); + bool HasMask = R->getValueAsBit("HasMask"); + bool HasMaskedOffOperand = R->getValueAsBit("HasMaskedOffOperand"); + bool HasVL = R->getValueAsBit("HasVL"); + bool HasGeneric = R->getValueAsBit("HasGeneric"); + bool HasSideEffects = R->getValueAsBit("HasSideEffects"); + std::vector Log2LMULList = R->getValueAsListOfInts("Log2LMUL"); + std::vector IntrinsicTypes = + R->getValueAsListOfInts("IntrinsicTypes"); + StringRef IRName = R->getValueAsString("IRName"); + StringRef IRNameMask = R->getValueAsString("IRNameMask"); + + // Parse prototype and create a list of primitive type with transformers + // (operand) in ProtoSeq. ProtoSeq[0] is output operand. + SmallVector ProtoSeq; + const StringRef Primaries("evwqom0ztc"); + size_t start = 0; + for (size_t i = 0; i < Prototypes.size(); ++i) { + if (Primaries.find(Prototypes[i]) != StringRef::npos) { + ProtoSeq.push_back(Prototypes.substr(start, i - start + 1).str()); + start = i + 1; + } + } + // If HasVL, append 'z' to last operand + if (HasVL) + ProtoSeq.push_back("z"); + + SmallVector ProtoMaskSeq = ProtoSeq; + if (HasMask) { + // If HasMask, insert 'm' as first input operand. + ProtoMaskSeq.insert(ProtoMaskSeq.begin() + 1, "m"); + // If HasMaskedOffOperand, insert result type as second input operand. + if (HasMaskedOffOperand) + ProtoMaskSeq.insert(ProtoMaskSeq.begin() + 2, ProtoSeq[0]); + } + + // Create intrinsics for each type and LMUL. + for (char I : TypeRange) { + for (int Log2LMUL : Log2LMULList) { + Optional Types = computeTypes(I, Log2LMUL, ProtoSeq); + // Ignored to create new intrinsic if there are any illegal types. + if (!Types.hasValue()) + continue; + + auto SuffixStr = + computeType(I, Log2LMUL, Suffix).getValue()->short_str(); + // Create a non-mask intrinsic. + Out.push_back(std::make_unique( + Name, SuffixStr, MangledName, IRName, HasSideEffects, + /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, HasGeneric, + Types.getValue(), IntrinsicTypes)); + if (HasMask) { + // Create a mask intrinsic + Optional MaskTypes = + computeTypes(I, Log2LMUL, ProtoMaskSeq); + Out.push_back(std::make_unique( + Name, SuffixStr, MangledName, IRNameMask, HasSideEffects, + /*IsMask=*/true, HasMaskedOffOperand, HasVL, HasGeneric, + MaskTypes.getValue(), IntrinsicTypes)); + } + } // end for Log2LMUL + } // end for TypeRange + } +} + +Optional +RVVEmitter::computeTypes(BasicType BT, int Log2LMUL, + ArrayRef PrototypeSeq) { + RVVTypes Types; + for (const std::string &Proto : PrototypeSeq) { + auto T = computeType(BT, Log2LMUL, Proto); + if (!T.hasValue()) + return llvm::None; + // Record legal type index + Types.push_back(T.getValue()); + } + return Optional(Types); +} + +Optional RVVEmitter::computeType(BasicType BT, int Log2LMUL, + StringRef Proto) { + TypeString Idx = Twine(Twine(BT) + Twine(Log2LMUL) + Proto).str(); + // Search first + auto It = LegalTypes.find(Idx); + if (It != LegalTypes.end()) + return Optional(&(It->second)); + if (IllegalTypes.count(Idx)) + return llvm::None; + // Compute type and record the result. + RVVType T(BT, Log2LMUL, Proto); + if (T.isValid()) { + // Record legal type index and value. + LegalTypes.insert({Idx, T}); + return Optional(&(LegalTypes[Idx])); + } + // Record illegal type index. + IllegalTypes.insert(Idx); + return llvm::None; +} + +void RVVEmitter::emitArchMacroAndBody( + std::vector> &Defs, raw_ostream &OS, + std::function DumpBody) { + + // Collect the same extension intrinsic in the one set for arch guard marco. + DenseMap, 256>> DefsSet; + for (auto &def : Defs) { + DefsSet[def->getRISCV_Extensions()].push_back(std::move(def)); + } + + for (const auto &KV : DefsSet) { + SmallVector ExtStrings = getExtStrings(KV.getFirst()); + // Dump arch predecessor definitions + if (!ExtStrings.empty()) { + OS << "#if defined (" + ExtStrings[0] + ")"; + for (unsigned i = 1; i < ExtStrings.size(); ++i) + OS << " || defined(" << ExtStrings[i] << ")"; + OS << "\n"; + } + for (auto &Def : KV.getSecond()) { + DumpBody(OS, *Def); + } + if (!ExtStrings.empty()) + OS << "#endif\n\n"; + } +} + +SmallVector RVVEmitter::getExtStrings(uint8_t Extents) { + if (Extents == 0) + return {}; + SmallVector ExtVector; + // D implies F + if (Extents & RISCV_Extension::F) { + ExtVector.emplace_back("__riscv_f"); + ExtVector.emplace_back("__riscv_d"); + } + if (Extents & RISCV_Extension::D) { + ExtVector.emplace_back("__riscv_d"); + } + if (Extents & RISCV_Extension::ZFH) { + ExtVector.emplace_back("__riscv_zfh"); + } + return ExtVector; +} + +namespace clang { +void EmitRVVHeader(RecordKeeper &Records, raw_ostream &OS) { + RVVEmitter(Records).createHeader(OS); +} + +void EmitRVVGenericHeader(RecordKeeper &Records, raw_ostream &OS) { + RVVEmitter(Records).createGenericHeader(OS); +} + +void EmitRVVBuiltins(RecordKeeper &Records, raw_ostream &OS) { + RVVEmitter(Records).createBuiltins(OS); +} + +void EmitRVVBuiltinCG(RecordKeeper &Records, raw_ostream &OS) { + RVVEmitter(Records).createCodeGen(OS); +} + +} // End namespace clang diff --git a/clang/utils/TableGen/TableGen.cpp b/clang/utils/TableGen/TableGen.cpp --- a/clang/utils/TableGen/TableGen.cpp +++ b/clang/utils/TableGen/TableGen.cpp @@ -83,6 +83,10 @@ GenArmCdeBuiltinSema, GenArmCdeBuiltinCG, GenArmCdeBuiltinAliases, + GenRISCVVectorHeader, + GenRISCVVectorGenericHeader, + GenRISCVVectorBuiltins, + GenRISCVVectorBuiltinCG, GenAttrDocs, GenDiagDocs, GenOptDocs, @@ -228,6 +232,15 @@ "Generate ARM CDE builtin code-generator for clang"), clEnumValN(GenArmCdeBuiltinAliases, "gen-arm-cde-builtin-aliases", "Generate list of valid ARM CDE builtin aliases for clang"), + clEnumValN(GenRISCVVectorHeader, "gen-riscv-vector-header", + "Generate riscv_vector.h for clang"), + clEnumValN(GenRISCVVectorGenericHeader, + "gen-riscv-vector-generic-header", + "Generate riscv_vector_generic.h for clang"), + clEnumValN(GenRISCVVectorBuiltins, "gen-riscv-vector-builtins", + "Generate riscv_vector_builtins.inc for clang"), + clEnumValN(GenRISCVVectorBuiltinCG, "gen-riscv-vector-builtin-codegen", + "Generate riscv_vector_builtin_cg.inc for clang"), clEnumValN(GenAttrDocs, "gen-attr-docs", "Generate attribute documentation"), clEnumValN(GenDiagDocs, "gen-diag-docs", @@ -428,6 +441,18 @@ case GenArmCdeBuiltinAliases: EmitCdeBuiltinAliases(Records, OS); break; + case GenRISCVVectorHeader: + EmitRVVHeader(Records, OS); + break; + case GenRISCVVectorGenericHeader: + EmitRVVGenericHeader(Records, OS); + break; + case GenRISCVVectorBuiltins: + EmitRVVBuiltins(Records, OS); + break; + case GenRISCVVectorBuiltinCG: + EmitRVVBuiltinCG(Records, OS); + break; case GenAttrDocs: EmitClangAttrDocs(Records, OS); break; diff --git a/clang/utils/TableGen/TableGenBackends.h b/clang/utils/TableGen/TableGenBackends.h --- a/clang/utils/TableGen/TableGenBackends.h +++ b/clang/utils/TableGen/TableGenBackends.h @@ -106,6 +106,11 @@ void EmitMveBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); void EmitMveBuiltinAliases(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); +void EmitRVVHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); +void EmitRVVGenericHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); +void EmitRVVBuiltins(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); +void EmitRVVBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); + void EmitCdeHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); void EmitCdeBuiltinDef(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); void EmitCdeBuiltinSema(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); diff --git a/llvm/docs/CommandGuide/tblgen.rst b/llvm/docs/CommandGuide/tblgen.rst --- a/llvm/docs/CommandGuide/tblgen.rst +++ b/llvm/docs/CommandGuide/tblgen.rst @@ -541,6 +541,22 @@ Generate list of valid ARM CDE builtin aliases for Clang. +.. option:: -gen-riscv-vector-header + + Generate ``riscv_vector.h`` for Clang. + +.. option:: -gen-riscv-vector-generic-header + + Generate ``riscv_vector_generic.h`` for Clang. + +.. option:: -gen-riscv-vector-builtins + + Generate ``riscv_vector_builtins.inc`` for Clang. + +.. option:: -gen-riscv-vector-builtin-codegen + + Generate ``riscv_vector_builtin_cg.inc`` for Clang. + .. option:: -gen-attr-docs Generate attribute documentation.