diff --git a/clang/include/clang/Basic/BuiltinsRISCV.def b/clang/include/clang/Basic/BuiltinsRISCV.def --- a/clang/include/clang/Basic/BuiltinsRISCV.def +++ b/clang/include/clang/Basic/BuiltinsRISCV.def @@ -1,183 +1,3 @@ -#if defined(BUILTIN) && !defined(RISCVV_BUILTIN) -#define RISCVV_BUILTIN(ID, TYPE, ATTRS) BUILTIN(ID, TYPE, ATTRS) -#endif -RISCVV_BUILTIN(vadd_vv_i8m1_vl, "q8Scq8Scq8Scz", "n") -RISCVV_BUILTIN(vadd_vv_i8m1_m_vl, "q8Scq8bq8Scq8Scq8Scz", "n") -RISCVV_BUILTIN(vadd_vv_i16m1_vl, "q4Ssq4Ssq4Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i16m1_m_vl, "q4Ssq4bq4Ssq4Ssq4Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i32m1_vl, "q2Siq2Siq2Siz", "n") -RISCVV_BUILTIN(vadd_vv_i32m1_m_vl, "q2Siq2bq2Siq2Siq2Siz", "n") -RISCVV_BUILTIN(vadd_vv_i64m1_vl, "q1SWiq1SWiq1SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i64m1_m_vl, "q1SWiq1bq1SWiq1SWiq1SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i8m2_vl, "q16Scq16Scq16Scz", "n") -RISCVV_BUILTIN(vadd_vv_i8m2_m_vl, "q16Scq16bq16Scq16Scq16Scz", "n") -RISCVV_BUILTIN(vadd_vv_i16m2_vl, "q8Ssq8Ssq8Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i16m2_m_vl, "q8Ssq8bq8Ssq8Ssq8Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i32m2_vl, "q4Siq4Siq4Siz", "n") -RISCVV_BUILTIN(vadd_vv_i32m2_m_vl, "q4Siq4bq4Siq4Siq4Siz", "n") -RISCVV_BUILTIN(vadd_vv_i64m2_vl, "q2SWiq2SWiq2SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i64m2_m_vl, "q2SWiq2bq2SWiq2SWiq2SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i8m4_vl, "q32Scq32Scq32Scz", "n") -RISCVV_BUILTIN(vadd_vv_i8m4_m_vl, "q32Scq32bq32Scq32Scq32Scz", "n") -RISCVV_BUILTIN(vadd_vv_i16m4_vl, "q16Ssq16Ssq16Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i16m4_m_vl, "q16Ssq16bq16Ssq16Ssq16Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i32m4_vl, "q8Siq8Siq8Siz", "n") -RISCVV_BUILTIN(vadd_vv_i32m4_m_vl, "q8Siq8bq8Siq8Siq8Siz", "n") -RISCVV_BUILTIN(vadd_vv_i64m4_vl, "q4SWiq4SWiq4SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i64m4_m_vl, "q4SWiq4bq4SWiq4SWiq4SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i8m8_vl, "q64Scq64Scq64Scz", "n") -RISCVV_BUILTIN(vadd_vv_i8m8_m_vl, "q64Scq64bq64Scq64Scq64Scz", "n") -RISCVV_BUILTIN(vadd_vv_i16m8_vl, "q32Ssq32Ssq32Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i16m8_m_vl, "q32Ssq32bq32Ssq32Ssq32Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i32m8_vl, "q16Siq16Siq16Siz", "n") -RISCVV_BUILTIN(vadd_vv_i32m8_m_vl, "q16Siq16bq16Siq16Siq16Siz", "n") -RISCVV_BUILTIN(vadd_vv_i64m8_vl, "q8SWiq8SWiq8SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i64m8_m_vl, "q8SWiq8bq8SWiq8SWiq8SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i8mf2_vl, "q4Scq4Scq4Scz", "n") -RISCVV_BUILTIN(vadd_vv_i8mf2_m_vl, "q4Scq4bq4Scq4Scq4Scz", "n") -RISCVV_BUILTIN(vadd_vv_i16mf2_vl, "q2Ssq2Ssq2Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i16mf2_m_vl, "q2Ssq2bq2Ssq2Ssq2Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i32mf2_vl, "q1Siq1Siq1Siz", "n") -RISCVV_BUILTIN(vadd_vv_i32mf2_m_vl, "q1Siq1bq1Siq1Siq1Siz", "n") -RISCVV_BUILTIN(vadd_vv_i8mf4_vl, "q2Scq2Scq2Scz", "n") -RISCVV_BUILTIN(vadd_vv_i8mf4_m_vl, "q2Scq2bq2Scq2Scq2Scz", "n") -RISCVV_BUILTIN(vadd_vv_i16mf4_vl, "q1Ssq1Ssq1Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i16mf4_m_vl, "q1Ssq1bq1Ssq1Ssq1Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i8mf8_vl, "q1Scq1Scq1Scz", "n") -RISCVV_BUILTIN(vadd_vv_i8mf8_m_vl, "q1Scq1bq1Scq1Scq1Scz", "n") -RISCVV_BUILTIN(vadd_vx_i8m1_vl, "q8Scq8ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i8m1_m_vl, "q8Scq8bq8Scq8ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i16m1_vl, "q4Ssq4SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i16m1_m_vl, "q4Ssq4bq4Ssq4SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i32m1_vl, "q2Siq2SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i32m1_m_vl, "q2Siq2bq2Siq2SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m1_vl, "q1SWiq1SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m1_m_vl, "q1SWiq1bq1SWiq1SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i8m2_vl, "q16Scq16ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i8m2_m_vl, "q16Scq16bq16Scq16ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i16m2_vl, "q8Ssq8SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i16m2_m_vl, "q8Ssq8bq8Ssq8SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i32m2_vl, "q4Siq4SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i32m2_m_vl, "q4Siq4bq4Siq4SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m2_vl, "q2SWiq2SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m2_m_vl, "q2SWiq2bq2SWiq2SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i8m4_vl, "q32Scq32ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i8m4_m_vl, "q32Scq32bq32Scq32ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i16m4_vl, "q16Ssq16SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i16m4_m_vl, "q16Ssq16bq16Ssq16SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i32m4_vl, "q8Siq8SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i32m4_m_vl, "q8Siq8bq8Siq8SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m4_vl, "q4SWiq4SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m4_m_vl, "q4SWiq4bq4SWiq4SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i8m8_vl, "q64Scq64ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i8m8_m_vl, "q64Scq64bq64Scq64ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i16m8_vl, "q32Ssq32SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i16m8_m_vl, "q32Ssq32bq32Ssq32SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i32m8_vl, "q16Siq16SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i32m8_m_vl, "q16Siq16bq16Siq16SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m8_vl, "q8SWiq8SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m8_m_vl, "q8SWiq8bq8SWiq8SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i8mf2_vl, "q4Scq4ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i8mf2_m_vl, "q4Scq4bq4Scq4ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i16mf2_vl, "q2Ssq2SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i16mf2_m_vl, "q2Ssq2bq2Ssq2SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i32mf2_vl, "q1Siq1SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i32mf2_m_vl, "q1Siq1bq1Siq1SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i8mf4_vl, "q2Scq2ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i8mf4_m_vl, "q2Scq2bq2Scq2ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i16mf4_vl, "q1Ssq1SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i16mf4_m_vl, "q1Ssq1bq1Ssq1SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i8mf8_vl, "q1Scq1ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i8mf8_m_vl, "q1Scq1bq1Scq1ScScz", "n") -RISCVV_BUILTIN(vadd_vv_u8m1_vl, "q8Ucq8Ucq8Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u8m1_m_vl, "q8Ucq8bq8Ucq8Ucq8Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u16m1_vl, "q4Usq4Usq4Usz", "n") -RISCVV_BUILTIN(vadd_vv_u16m1_m_vl, "q4Usq4bq4Usq4Usq4Usz", "n") -RISCVV_BUILTIN(vadd_vv_u32m1_vl, "q2Uiq2Uiq2Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u32m1_m_vl, "q2Uiq2bq2Uiq2Uiq2Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m1_vl, "q1UWiq1UWiq1UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m1_m_vl, "q1UWiq1bq1UWiq1UWiq1UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u8m2_vl, "q16Ucq16Ucq16Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u8m2_m_vl, "q16Ucq16bq16Ucq16Ucq16Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u16m2_vl, "q8Usq8Usq8Usz", "n") -RISCVV_BUILTIN(vadd_vv_u16m2_m_vl, "q8Usq8bq8Usq8Usq8Usz", "n") -RISCVV_BUILTIN(vadd_vv_u32m2_vl, "q4Uiq4Uiq4Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u32m2_m_vl, "q4Uiq4bq4Uiq4Uiq4Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m2_vl, "q2UWiq2UWiq2UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m2_m_vl, "q2UWiq2bq2UWiq2UWiq2UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u8m4_vl, "q32Ucq32Ucq32Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u8m4_m_vl, "q32Ucq32bq32Ucq32Ucq32Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u16m4_vl, "q16Usq16Usq16Usz", "n") -RISCVV_BUILTIN(vadd_vv_u16m4_m_vl, "q16Usq16bq16Usq16Usq16Usz", "n") -RISCVV_BUILTIN(vadd_vv_u32m4_vl, "q8Uiq8Uiq8Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u32m4_m_vl, "q8Uiq8bq8Uiq8Uiq8Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m4_vl, "q4UWiq4UWiq4UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m4_m_vl, "q4UWiq4bq4UWiq4UWiq4UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u8m8_vl, "q64Ucq64Ucq64Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u8m8_m_vl, "q64Ucq64bq64Ucq64Ucq64Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u16m8_vl, "q32Usq32Usq32Usz", "n") -RISCVV_BUILTIN(vadd_vv_u16m8_m_vl, "q32Usq32bq32Usq32Usq32Usz", "n") -RISCVV_BUILTIN(vadd_vv_u32m8_vl, "q16Uiq16Uiq16Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u32m8_m_vl, "q16Uiq16bq16Uiq16Uiq16Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m8_vl, "q8UWiq8UWiq8UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m8_m_vl, "q8UWiq8bq8UWiq8UWiq8UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u8mf2_vl, "q4Ucq4Ucq4Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u8mf2_m_vl, "q4Ucq4bq4Ucq4Ucq4Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u16mf2_vl, "q2Usq2Usq2Usz", "n") -RISCVV_BUILTIN(vadd_vv_u16mf2_m_vl, "q2Usq2bq2Usq2Usq2Usz", "n") -RISCVV_BUILTIN(vadd_vv_u32mf2_vl, "q1Uiq1Uiq1Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u32mf2_m_vl, "q1Uiq1bq1Uiq1Uiq1Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u8mf4_vl, "q2Ucq2Ucq2Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u8mf4_m_vl, "q2Ucq2bq2Ucq2Ucq2Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u16mf4_vl, "q1Usq1Usq1Usz", "n") -RISCVV_BUILTIN(vadd_vv_u16mf4_m_vl, "q1Usq1bq1Usq1Usq1Usz", "n") -RISCVV_BUILTIN(vadd_vv_u8mf8_vl, "q1Ucq1Ucq1Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u8mf8_m_vl, "q1Ucq1bq1Ucq1Ucq1Ucz", "n") -RISCVV_BUILTIN(vadd_vx_u8m1_vl, "q8Ucq8UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u8m1_m_vl, "q8Ucq8bq8Ucq8UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u16m1_vl, "q4Usq4UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u16m1_m_vl, "q4Usq4bq4Usq4UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u32m1_vl, "q2Uiq2UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u32m1_m_vl, "q2Uiq2bq2Uiq2UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m1_vl, "q1UWiq1UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m1_m_vl, "q1UWiq1bq1UWiq1UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u8m2_vl, "q16Ucq16UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u8m2_m_vl, "q16Ucq16bq16Ucq16UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u16m2_vl, "q8Usq8UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u16m2_m_vl, "q8Usq8bq8Usq8UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u32m2_vl, "q4Uiq4UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u32m2_m_vl, "q4Uiq4bq4Uiq4UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m2_vl, "q2UWiq2UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m2_m_vl, "q2UWiq2bq2UWiq2UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u8m4_vl, "q32Ucq32UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u8m4_m_vl, "q32Ucq32bq32Ucq32UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u16m4_vl, "q16Usq16UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u16m4_m_vl, "q16Usq16bq16Usq16UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u32m4_vl, "q8Uiq8UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u32m4_m_vl, "q8Uiq8bq8Uiq8UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m4_vl, "q4UWiq4UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m4_m_vl, "q4UWiq4bq4UWiq4UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u8m8_vl, "q64Ucq64UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u8m8_m_vl, "q64Ucq64bq64Ucq64UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u16m8_vl, "q32Usq32UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u16m8_m_vl, "q32Usq32bq32Usq32UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u32m8_vl, "q16Uiq16UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u32m8_m_vl, "q16Uiq16bq16Uiq16UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m8_vl, "q8UWiq8UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m8_m_vl, "q8UWiq8bq8UWiq8UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u8mf2_vl, "q4Ucq4UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u8mf2_m_vl, "q4Ucq4bq4Ucq4UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u16mf2_vl, "q2Usq2UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u16mf2_m_vl, "q2Usq2bq2Usq2UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u32mf2_vl, "q1Uiq1UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u32mf2_m_vl, "q1Uiq1bq1Uiq1UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u8mf4_vl, "q2Ucq2UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u8mf4_m_vl, "q2Ucq2bq2Ucq2UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u16mf4_vl, "q1Usq1UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u16mf4_m_vl, "q1Usq1bq1Usq1UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u8mf8_vl, "q1Ucq1UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u8mf8_m_vl, "q1Ucq1bq1Ucq1UcUcz", "n") +#include "clang/Basic/riscv_vector_builtins.inc" -#undef BUILTIN -#undef RISCVV_BUILTIN diff --git a/clang/include/clang/Basic/CMakeLists.txt b/clang/include/clang/Basic/CMakeLists.txt --- a/clang/include/clang/Basic/CMakeLists.txt +++ b/clang/include/clang/Basic/CMakeLists.txt @@ -84,3 +84,9 @@ clang_tablegen(arm_cde_builtin_aliases.inc -gen-arm-cde-builtin-aliases SOURCE arm_cde.td TARGET ClangARMCdeBuiltinAliases) +clang_tablegen(riscv_vector_builtins.inc -gen-riscv-vector-builtins -D=ALL + SOURCE riscv_vector.td + TARGET ClangRISCVVectorBuiltins) +clang_tablegen(riscv_vector_builtin_cg.inc -gen-riscv-vector-builtin-codegen -D=ALL + SOURCE riscv_vector.td + TARGET ClangRISCVVectorBuiltinCG) diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td new file mode 100644 --- /dev/null +++ b/clang/include/clang/Basic/riscv_vector.td @@ -0,0 +1,224 @@ +//==--- riscv_vector.td - RISC-V V-ext Builtin function list --------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the builtins for RISC-V V-extension. See: +// +// https://github.com/riscv/rvv-intrinsic-doc +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Instruction definitions +//===----------------------------------------------------------------------===// +// Each record of the class RVVBuiltin defines a collection of builtins (i.e. +// "def vadd : RVVBuiltin" will be used to define things like "vadd_vv_i32m1", +// "vadd_vv_i32m2", etc). +// +// The elements of this collection are defined by an instantiation process the +// range of which is specified by the cross product of the LMUL attribute and +// every element in the attribute TypeRange. By default builtins have LMUL = [1, +// 2, 4, 8, 1/2, 1/4, 1/8] so the process is repeated 7 times. In tablegen we +// use the exponental LMUL (ELMUL) [0, 1, 2, 3, -1, -2, -3] to represent the +// LMUL. +// +// LMUL represents the fact that the types of values used by that builtin are +// values generated by instructions that are executed under that LMUL. However, +// this does not mean the builtin is necessarily lowered into an instruction +// that executes under the specified LMUL. An example where this happens are +// loads and stores of masks. A mask like `vbool8_t` can be generated, for +// instance, by comparing two `__rvv_int8m1_t` (this is LMUL=1) or comparing two +// `__rvv_int16m2_t` (this is LMUL=2). The actual load or store, however, will +// be performed under LMUL=1 because mask registers are not grouped. +// +// TypeRange is a non-empty sequence of basic types: +// +// c: int8_t (i8) +// s: int16_t (i16) +// i: int32_t (i32) +// l: int64_t (i64) +// h: float16_t (half) +// f: float32_t (float) +// d: float64_t (double) +// +// This way, given an LMUL, a record with a TypeRange "sil" will cause the +// definition of 3 builtins. Each type "t" in the TypeRange (in this example +// they are int16_t, int32_t, int64_t) is used as a parameter that drives the +// definition of that particular builtin (for the given LMUL). +// +// During the instantiation, types can be transformed or modified using type +// transformers. Given a type "t" the following primitive type transformers can +// be applied to it to yield another type. +// +// e: type of "t" as is (identity) +// v: computes a vector type whose element type is "t" for the current LMUL +// w: computes a vector type identical to what 'v' computes except for the +// element type which is twice as wide as the element type of 'v' +// q: computes a vector type identical to what 'v' computes except for the +// element type which is four times as wide as the element type of 'v' +// o: computes a vector type identical to what 'v' computes except for the +// element type which is eight times as wide as the element type of 'v' +// m: computes a vector type identical to what 'v' computes except for the +// element type which is bool +// 0: void type, ignores "t" +// z: size_t, ignores "t" +// t: ptrdiff_t, ignores "t" +// c: uint8_t, ignores "t" +// +// So for instance if t is "i", i.e. int, then "e" will yield int again. "v" +// will yield an RVV vector type (assume LMUL=1), so __rvv_int32m1_t. +// Accordingly "w" would yield __rvv_int64m2_t. +// +// A type transformer can be prefixed by other non-primitive type transformers. +// +// P: constructs a pointer to the current type +// C: adds const to the type +// K: requires the integer type to be a constant expression +// U: given an integer type or vector type, computes its unsigned variant +// I: given a vector type, compute the vector type with integer type +// elements of the same width +// F: given a vector type, compute the vector type with floating-point type +// elements of the same width +// W: widens an integer or float type. Cannot be used on vectors +// S: given a vector type, computes its equivalent one for LMUL=1. This is a +// no-op if the vector was already LMUL=1 +// +// Following with the example above, if t is "i", then "Ue" will yield unsigned +// int and "Fv" will yield __rvv_float32m1_t (again assuming LMUL=1), Fw would +// yield __rvv_float64m2_t, etc. +// +// Each builtin is then defined by applying each type in TypeRange against the +// sequence of type transformers described in Suffix and Prototype. +// +// The name of the builtin is defined by the Name attribute (which defaults to +// the name of the class) appended (separated with an underscore) the Suffix +// attribute. For instance with Name="foo", Suffix = "v" and TypeRange = "il", +// the builtin generated will be __builtin_rvv_foo_i32m1 and +// __builtin_rvv_foo_i64m1 (under LMUL=1). If Suffix contains more than one +// type transformer (say "vv") each of the types is separated with an +// underscore as in "__builtin_rvv_foo_i32m1_i32m1". +// +// The C/C++ prototype of the builtin is defined by the Prototype attribute. +// Prototype is a non-empty sequence of type transformers, the first of which +// is the return type of the builtin and the rest are the parameters of the +// builtin, in order. For instance if Prototype is "wvv" and TypeRange is "si" +// a first builtin will have type +// __rvv_int32m2_t (__rvv_int16m1_t, __rvv_int16m1_t) and the second builtin +// will have type __rvv_int64m2_t (__rvv_int32m1_t, __rvv_int32m1_t) (again +// under LMUL=1). +// +// There are a number of attributes that are used to constraint the number and +// shape of the builtins generated. Refer to the comments below for them. +class RVVBuiltin { + // Base name that will be prepended in __builtin_rvv_ and appended the + // computed Suffix. + string Name = NAME; + + // If not empty, each instantiated builtin will have this appended after an + // underscore (_). It is instantiated like Prototype. + string Suffix = suffix; + + // If not empty, each instantiated mangled builtin will have this appended + // after an underscore (_). It is instantiated like Prototype. + string MangledSuffix = managed_suffix; + + // The different variants of the builtin, parameterised with a type. + string TypeRange = type_range; + + // We use each type described in TypeRange and LMUL with prototype to + // instantiate a specific element of the set of builtins being defined. + // Prototype attribute defines the C/C++ prototype of the builtin. It is a + // non-empty sequence of type transformers, the first of which is the return + // type of the builtin and the rest are the parameters of the builtin, in + // order. For instance if Prototype is "wvv", TypeRange is "si" and LMUL=1, a + // first builtin will have type + // __rvv_int32m2_t (__rvv_int16m1_t, __rvv_int16m1_t), and the second builtin + // will have type __rvv_int64m2_t (__rvv_int32m1_t, __rvv_int32m1_t). + string Prototype = prototype; + + // This builtin has a masked form. + bit HasMask = true; + + // If HasMask, this flag states that this builtin has a merge operand. It is + // always the first operand. + bit HasMergeOperand = true; + + // This builtin has a granted vector length parameter in the last position. + bit HasVL = true; + + // This builtin supports function overloading and has a mangled name. + bit HasGeneric = true; + + // Reads or writes "memory" or has other side-effects. + bit HasSideEffects = false; + + // This builtin is valid for the given exponental LMULs. + list ELMUL = [0, 1, 2, 3, -1, -2, -3]; + + // Emit the automatic clang codegen. It describes what types we have to use + // to obtain the specific LLVM intrinsic. -1 means the return type, otherwise, + // k >= 0 meaning the k-th operand (counting from zero) of the codegen'd + // parameter of the unmasked version. k can't be the mask operand's position. + list IntrinsicTypes = []; + + // If these names are not empty, this is the ID of the LLVM intrinsic + // we want to lower to. + string IRName = NAME; + + // If HasMask, this is the ID of the LLVM intrinsic we want to lower to. + string IRNameMask = NAME #"_mask"; +} + +//===----------------------------------------------------------------------===// +// Basic classes with automatic codegen. +//===----------------------------------------------------------------------===// +class RVVBinBuiltin + : RVVBuiltin { + let IntrinsicTypes = [-1, 1]; +} + +multiclass RVVBinBuiltinSet> suffixes_prototypes> { + let IRName = intrinsic_name, IRNameMask = intrinsic_name #"_mask" in { + foreach s_p = suffixes_prototypes in { + let Name = NAME #"_" # s_p[0] in { + defvar suffix = s_p[1]; + defvar prototype = s_p[2]; + + def : RVVBinBuiltin; + } + } + } +} + +// `ALL` macro should be defined in all gen-riscv-* targets except +// gen-riscv-vector-test. gen-rvv-tests.py will define each macro to +// generate each intrinsic test in different files. It mean adding the new +// definition also need to update op_list in gen-rvv-tests.py. +#ifdef ALL +#define VADD +#define VFADD +#endif + +// 12. Vector Integer Arithmetic Instructions +// 12.1. Vector Single-Width Integer Add and Subtract +#ifdef VADD +defm vadd : RVVBinBuiltinSet<"vadd", "csil", + [["vv", "v", "vvv"], + ["vx", "v", "vve"], + ["vv", "Uv", "UvUvUv"], + ["vx", "Uv", "UvUvUe"]]>; +#endif + +// 14. Vector Floating-Point Instructions +// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions +#ifdef VFADD +defm vfadd : RVVBinBuiltinSet<"vfadd", "fd", + [["vv", "v", "vvv"], + ["vf", "v", "vve"]]>; +#endif diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -17607,196 +17607,7 @@ // Required for overloaded intrinsics. llvm::SmallVector IntrinsicTypes; switch (BuiltinID) { - // We could generate all the possible combinations and handling code in - // a file and include it here, instead of listing all the builtins plainly. - // Something like - // #include clang/Basic/RISCVVBuiltinCodeGen.inc - case RISCV::BIvadd_vv_i8m1_vl: - case RISCV::BIvadd_vv_i16m1_vl: - case RISCV::BIvadd_vv_i32m1_vl: - case RISCV::BIvadd_vv_i64m1_vl: - case RISCV::BIvadd_vv_i8m2_vl: - case RISCV::BIvadd_vv_i16m2_vl: - case RISCV::BIvadd_vv_i32m2_vl: - case RISCV::BIvadd_vv_i64m2_vl: - case RISCV::BIvadd_vv_i8m4_vl: - case RISCV::BIvadd_vv_i16m4_vl: - case RISCV::BIvadd_vv_i32m4_vl: - case RISCV::BIvadd_vv_i64m4_vl: - case RISCV::BIvadd_vv_i8m8_vl: - case RISCV::BIvadd_vv_i16m8_vl: - case RISCV::BIvadd_vv_i32m8_vl: - case RISCV::BIvadd_vv_i64m8_vl: - case RISCV::BIvadd_vv_i8mf2_vl: - case RISCV::BIvadd_vv_i16mf2_vl: - case RISCV::BIvadd_vv_i32mf2_vl: - case RISCV::BIvadd_vv_i8mf4_vl: - case RISCV::BIvadd_vv_i16mf4_vl: - case RISCV::BIvadd_vv_i8mf8_vl: - case RISCV::BIvadd_vx_i8m1_vl: - case RISCV::BIvadd_vx_i16m1_vl: - case RISCV::BIvadd_vx_i32m1_vl: - case RISCV::BIvadd_vx_i64m1_vl: - case RISCV::BIvadd_vx_i8m2_vl: - case RISCV::BIvadd_vx_i16m2_vl: - case RISCV::BIvadd_vx_i32m2_vl: - case RISCV::BIvadd_vx_i64m2_vl: - case RISCV::BIvadd_vx_i8m4_vl: - case RISCV::BIvadd_vx_i16m4_vl: - case RISCV::BIvadd_vx_i32m4_vl: - case RISCV::BIvadd_vx_i64m4_vl: - case RISCV::BIvadd_vx_i8m8_vl: - case RISCV::BIvadd_vx_i16m8_vl: - case RISCV::BIvadd_vx_i32m8_vl: - case RISCV::BIvadd_vx_i64m8_vl: - case RISCV::BIvadd_vx_i8mf2_vl: - case RISCV::BIvadd_vx_i16mf2_vl: - case RISCV::BIvadd_vx_i32mf2_vl: - case RISCV::BIvadd_vx_i8mf4_vl: - case RISCV::BIvadd_vx_i16mf4_vl: - case RISCV::BIvadd_vx_i8mf8_vl: - case RISCV::BIvadd_vv_u8m1_vl: - case RISCV::BIvadd_vv_u16m1_vl: - case RISCV::BIvadd_vv_u32m1_vl: - case RISCV::BIvadd_vv_u64m1_vl: - case RISCV::BIvadd_vv_u8m2_vl: - case RISCV::BIvadd_vv_u16m2_vl: - case RISCV::BIvadd_vv_u32m2_vl: - case RISCV::BIvadd_vv_u64m2_vl: - case RISCV::BIvadd_vv_u8m4_vl: - case RISCV::BIvadd_vv_u16m4_vl: - case RISCV::BIvadd_vv_u32m4_vl: - case RISCV::BIvadd_vv_u64m4_vl: - case RISCV::BIvadd_vv_u8m8_vl: - case RISCV::BIvadd_vv_u16m8_vl: - case RISCV::BIvadd_vv_u32m8_vl: - case RISCV::BIvadd_vv_u64m8_vl: - case RISCV::BIvadd_vv_u8mf2_vl: - case RISCV::BIvadd_vv_u16mf2_vl: - case RISCV::BIvadd_vv_u32mf2_vl: - case RISCV::BIvadd_vv_u8mf4_vl: - case RISCV::BIvadd_vv_u16mf4_vl: - case RISCV::BIvadd_vv_u8mf8_vl: - case RISCV::BIvadd_vx_u8m1_vl: - case RISCV::BIvadd_vx_u16m1_vl: - case RISCV::BIvadd_vx_u32m1_vl: - case RISCV::BIvadd_vx_u64m1_vl: - case RISCV::BIvadd_vx_u8m2_vl: - case RISCV::BIvadd_vx_u16m2_vl: - case RISCV::BIvadd_vx_u32m2_vl: - case RISCV::BIvadd_vx_u64m2_vl: - case RISCV::BIvadd_vx_u8m4_vl: - case RISCV::BIvadd_vx_u16m4_vl: - case RISCV::BIvadd_vx_u32m4_vl: - case RISCV::BIvadd_vx_u64m4_vl: - case RISCV::BIvadd_vx_u8m8_vl: - case RISCV::BIvadd_vx_u16m8_vl: - case RISCV::BIvadd_vx_u32m8_vl: - case RISCV::BIvadd_vx_u64m8_vl: - case RISCV::BIvadd_vx_u8mf2_vl: - case RISCV::BIvadd_vx_u16mf2_vl: - case RISCV::BIvadd_vx_u32mf2_vl: - case RISCV::BIvadd_vx_u8mf4_vl: - case RISCV::BIvadd_vx_u16mf4_vl: - case RISCV::BIvadd_vx_u8mf8_vl: - // The order of operands is (op1, op2, vl). - ID = Intrinsic::riscv_vadd; - IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType()}; - break; - case RISCV::BIvadd_vv_i8m1_m_vl: - case RISCV::BIvadd_vv_i16m1_m_vl: - case RISCV::BIvadd_vv_i32m1_m_vl: - case RISCV::BIvadd_vv_i64m1_m_vl: - case RISCV::BIvadd_vv_i8m2_m_vl: - case RISCV::BIvadd_vv_i16m2_m_vl: - case RISCV::BIvadd_vv_i32m2_m_vl: - case RISCV::BIvadd_vv_i64m2_m_vl: - case RISCV::BIvadd_vv_i8m4_m_vl: - case RISCV::BIvadd_vv_i16m4_m_vl: - case RISCV::BIvadd_vv_i32m4_m_vl: - case RISCV::BIvadd_vv_i64m4_m_vl: - case RISCV::BIvadd_vv_i8m8_m_vl: - case RISCV::BIvadd_vv_i16m8_m_vl: - case RISCV::BIvadd_vv_i32m8_m_vl: - case RISCV::BIvadd_vv_i64m8_m_vl: - case RISCV::BIvadd_vv_i8mf2_m_vl: - case RISCV::BIvadd_vv_i16mf2_m_vl: - case RISCV::BIvadd_vv_i32mf2_m_vl: - case RISCV::BIvadd_vv_i8mf4_m_vl: - case RISCV::BIvadd_vv_i16mf4_m_vl: - case RISCV::BIvadd_vv_i8mf8_m_vl: - case RISCV::BIvadd_vx_i8m1_m_vl: - case RISCV::BIvadd_vx_i16m1_m_vl: - case RISCV::BIvadd_vx_i32m1_m_vl: - case RISCV::BIvadd_vx_i64m1_m_vl: - case RISCV::BIvadd_vx_i8m2_m_vl: - case RISCV::BIvadd_vx_i16m2_m_vl: - case RISCV::BIvadd_vx_i32m2_m_vl: - case RISCV::BIvadd_vx_i64m2_m_vl: - case RISCV::BIvadd_vx_i8m4_m_vl: - case RISCV::BIvadd_vx_i16m4_m_vl: - case RISCV::BIvadd_vx_i32m4_m_vl: - case RISCV::BIvadd_vx_i64m4_m_vl: - case RISCV::BIvadd_vx_i8m8_m_vl: - case RISCV::BIvadd_vx_i16m8_m_vl: - case RISCV::BIvadd_vx_i32m8_m_vl: - case RISCV::BIvadd_vx_i64m8_m_vl: - case RISCV::BIvadd_vx_i8mf2_m_vl: - case RISCV::BIvadd_vx_i16mf2_m_vl: - case RISCV::BIvadd_vx_i32mf2_m_vl: - case RISCV::BIvadd_vx_i8mf4_m_vl: - case RISCV::BIvadd_vx_i16mf4_m_vl: - case RISCV::BIvadd_vx_i8mf8_m_vl: - case RISCV::BIvadd_vv_u8m1_m_vl: - case RISCV::BIvadd_vv_u16m1_m_vl: - case RISCV::BIvadd_vv_u32m1_m_vl: - case RISCV::BIvadd_vv_u64m1_m_vl: - case RISCV::BIvadd_vv_u8m2_m_vl: - case RISCV::BIvadd_vv_u16m2_m_vl: - case RISCV::BIvadd_vv_u32m2_m_vl: - case RISCV::BIvadd_vv_u64m2_m_vl: - case RISCV::BIvadd_vv_u8m4_m_vl: - case RISCV::BIvadd_vv_u16m4_m_vl: - case RISCV::BIvadd_vv_u32m4_m_vl: - case RISCV::BIvadd_vv_u64m4_m_vl: - case RISCV::BIvadd_vv_u8m8_m_vl: - case RISCV::BIvadd_vv_u16m8_m_vl: - case RISCV::BIvadd_vv_u32m8_m_vl: - case RISCV::BIvadd_vv_u64m8_m_vl: - case RISCV::BIvadd_vv_u8mf2_m_vl: - case RISCV::BIvadd_vv_u16mf2_m_vl: - case RISCV::BIvadd_vv_u32mf2_m_vl: - case RISCV::BIvadd_vv_u8mf4_m_vl: - case RISCV::BIvadd_vv_u16mf4_m_vl: - case RISCV::BIvadd_vv_u8mf8_m_vl: - case RISCV::BIvadd_vx_u8m1_m_vl: - case RISCV::BIvadd_vx_u16m1_m_vl: - case RISCV::BIvadd_vx_u32m1_m_vl: - case RISCV::BIvadd_vx_u64m1_m_vl: - case RISCV::BIvadd_vx_u8m2_m_vl: - case RISCV::BIvadd_vx_u16m2_m_vl: - case RISCV::BIvadd_vx_u32m2_m_vl: - case RISCV::BIvadd_vx_u64m2_m_vl: - case RISCV::BIvadd_vx_u8m4_m_vl: - case RISCV::BIvadd_vx_u16m4_m_vl: - case RISCV::BIvadd_vx_u32m4_m_vl: - case RISCV::BIvadd_vx_u64m4_m_vl: - case RISCV::BIvadd_vx_u8m8_m_vl: - case RISCV::BIvadd_vx_u16m8_m_vl: - case RISCV::BIvadd_vx_u32m8_m_vl: - case RISCV::BIvadd_vx_u64m8_m_vl: - case RISCV::BIvadd_vx_u8mf2_m_vl: - case RISCV::BIvadd_vx_u16mf2_m_vl: - case RISCV::BIvadd_vx_u32mf2_m_vl: - case RISCV::BIvadd_vx_u8mf4_m_vl: - case RISCV::BIvadd_vx_u16mf4_m_vl: - case RISCV::BIvadd_vx_u8mf8_m_vl: - ID = Intrinsic::riscv_vadd_mask; - // The order of operands is (mask, maskedoff, op1, op2, vl). - IntrinsicTypes = {ResultType, Ops[3]->getType(), Ops[4]->getType()}; - // The order of intrinsic operands is (maskedoff, op1, op2, mask, vl). - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); - break; +#include "clang/Basic/riscv_vector_builtin_cg.inc" } assert(ID != Intrinsic::not_intrinsic); diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt --- a/clang/lib/Headers/CMakeLists.txt +++ b/clang/lib/Headers/CMakeLists.txt @@ -179,14 +179,18 @@ set(out_files ${out_files} PARENT_SCOPE) endfunction(copy_header_to_output_dir) -function(clang_generate_header td_option td_file out_file) - clang_tablegen(${out_file} ${td_option} +function(clang_generate_header) + # Syntax: + # clang_generate_header [tablegen-arg ...] SOURCE td_file OUTPUT output-file + cmake_parse_arguments(td "" "SOURCE;OUTPUT" "" ${ARGN}) + + clang_tablegen(${td_OUTPUT} ${td_UNPARSED_ARGUMENTS} -I ${CLANG_SOURCE_DIR}/include/clang/Basic/ - SOURCE ${CLANG_SOURCE_DIR}/include/clang/Basic/${td_file}) + SOURCE ${CLANG_SOURCE_DIR}/include/clang/Basic/${td_SOURCE}) - copy_header_to_output_dir(${CMAKE_CURRENT_BINARY_DIR} ${out_file}) + copy_header_to_output_dir(${CMAKE_CURRENT_BINARY_DIR} ${td_OUTPUT}) set(out_files ${out_files} PARENT_SCOPE) - list(APPEND generated_files "${CMAKE_CURRENT_BINARY_DIR}/${out_file}") + list(APPEND generated_files "${CMAKE_CURRENT_BINARY_DIR}/${td_OUTPUT}") set(generated_files ${generated_files} PARENT_SCOPE) endfunction(clang_generate_header) @@ -198,17 +202,37 @@ # Generate header files and copy them to the build directory # Generate arm_neon.h -clang_generate_header(-gen-arm-neon arm_neon.td arm_neon.h) +clang_generate_header(-gen-arm-neon + SOURCE arm_neon.td + OUTPUT arm_neon.h) # Generate arm_fp16.h -clang_generate_header(-gen-arm-fp16 arm_fp16.td arm_fp16.h) +clang_generate_header(-gen-arm-fp16 + SOURCE arm_fp16.td + OUTPUT arm_fp16.h) # Generate arm_sve.h -clang_generate_header(-gen-arm-sve-header arm_sve.td arm_sve.h) +clang_generate_header(-gen-arm-sve-header + SOURCE arm_sve.td + OUTPUT arm_sve.h) # Generate arm_bf16.h -clang_generate_header(-gen-arm-bf16 arm_bf16.td arm_bf16.h) +clang_generate_header(-gen-arm-bf16 + SOURCE arm_bf16.td + OUTPUT arm_bf16.h) # Generate arm_mve.h -clang_generate_header(-gen-arm-mve-header arm_mve.td arm_mve.h) +clang_generate_header(-gen-arm-mve-header + SOURCE arm_mve.td + OUTPUT arm_mve.h) # Generate arm_cde.h -clang_generate_header(-gen-arm-cde-header arm_cde.td arm_cde.h) +clang_generate_header(-gen-arm-cde-header + SOURCE arm_cde.td + OUTPUT arm_cde.h) +# Generate riscv_vector.h +clang_generate_header(-gen-riscv-vector-header -D=ALL + SOURCE riscv_vector.td + OUTPUT riscv_vector.h) +# Generate riscv_vector_generic.h +clang_generate_header(-gen-riscv-vector-generic-header -D=ALL + SOURCE riscv_vector.td + OUTPUT riscv_vector_generic.h) add_custom_target(clang-resource-headers ALL DEPENDS ${out_files}) set_target_properties(clang-resource-headers PROPERTIES diff --git a/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics-generic/vadd.c b/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics-generic/vadd.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics-generic/vadd.c @@ -0,0 +1,2496 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t +// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t + +// NOTE: This file is autogenerated by 'bin/clang-tblgen -gen-riscv-vector-test clang/include/clang/Basic/riscv_vector.td -D=' + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vv_i8m1_vl(vint8m1_t arg_1, vint8m1_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vv_i8m1_m_vl(vbool8_t arg_1, vint8m1_t arg_2, vint8m1_t arg_3, vint8m1_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vv_i8m2_vl(vint8m2_t arg_1, vint8m2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vv_i8m2_m_vl(vbool4_t arg_1, vint8m2_t arg_2, vint8m2_t arg_3, vint8m2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vv_i8m4_vl(vint8m4_t arg_1, vint8m4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vv_i8m4_m_vl(vbool2_t arg_1, vint8m4_t arg_2, vint8m4_t arg_3, vint8m4_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vv_i8m8_vl(vint8m8_t arg_1, vint8m8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vv_i8m8_m_vl(vbool1_t arg_1, vint8m8_t arg_2, vint8m8_t arg_3, vint8m8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vv_i8mf2_vl(vint8mf2_t arg_1, vint8mf2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vv_i8mf2_m_vl(vbool16_t arg_1, vint8mf2_t arg_2, vint8mf2_t arg_3, vint8mf2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vv_i8mf4_vl(vint8mf4_t arg_1, vint8mf4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vv_i8mf4_m_vl(vbool32_t arg_1, vint8mf4_t arg_2, vint8mf4_t arg_3, vint8mf4_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vv_i8mf8_vl(vint8mf8_t arg_1, vint8mf8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vv_i8mf8_m_vl(vbool64_t arg_1, vint8mf8_t arg_2, vint8mf8_t arg_3, vint8mf8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vv_i16m1_vl(vint16m1_t arg_1, vint16m1_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vv_i16m1_m_vl(vbool16_t arg_1, vint16m1_t arg_2, vint16m1_t arg_3, vint16m1_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vv_i16m2_vl(vint16m2_t arg_1, vint16m2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vv_i16m2_m_vl(vbool8_t arg_1, vint16m2_t arg_2, vint16m2_t arg_3, vint16m2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vv_i16m4_vl(vint16m4_t arg_1, vint16m4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vv_i16m4_m_vl(vbool4_t arg_1, vint16m4_t arg_2, vint16m4_t arg_3, vint16m4_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vv_i16m8_vl(vint16m8_t arg_1, vint16m8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vv_i16m8_m_vl(vbool2_t arg_1, vint16m8_t arg_2, vint16m8_t arg_3, vint16m8_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vv_i16mf2_vl(vint16mf2_t arg_1, vint16mf2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vv_i16mf2_m_vl(vbool32_t arg_1, vint16mf2_t arg_2, vint16mf2_t arg_3, vint16mf2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vv_i16mf4_vl(vint16mf4_t arg_1, vint16mf4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vv_i16mf4_m_vl(vbool64_t arg_1, vint16mf4_t arg_2, vint16mf4_t arg_3, vint16mf4_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vv_i32m1_vl(vint32m1_t arg_1, vint32m1_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vv_i32m1_m_vl(vbool32_t arg_1, vint32m1_t arg_2, vint32m1_t arg_3, vint32m1_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vv_i32m2_vl(vint32m2_t arg_1, vint32m2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vv_i32m2_m_vl(vbool16_t arg_1, vint32m2_t arg_2, vint32m2_t arg_3, vint32m2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vv_i32m4_vl(vint32m4_t arg_1, vint32m4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vv_i32m4_m_vl(vbool8_t arg_1, vint32m4_t arg_2, vint32m4_t arg_3, vint32m4_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vv_i32m8_vl(vint32m8_t arg_1, vint32m8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vv_i32m8_m_vl(vbool4_t arg_1, vint32m8_t arg_2, vint32m8_t arg_3, vint32m8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vv_i32mf2_vl(vint32mf2_t arg_1, vint32mf2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vv_i32mf2_m_vl(vbool64_t arg_1, vint32mf2_t arg_2, vint32mf2_t arg_3, vint32mf2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vv_i64m1_vl(vint64m1_t arg_1, vint64m1_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vv_i64m1_m_vl(vbool64_t arg_1, vint64m1_t arg_2, vint64m1_t arg_3, vint64m1_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vv_i64m2_vl(vint64m2_t arg_1, vint64m2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vv_i64m2_m_vl(vbool32_t arg_1, vint64m2_t arg_2, vint64m2_t arg_3, vint64m2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vv_i64m4_vl(vint64m4_t arg_1, vint64m4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vv_i64m4_m_vl(vbool16_t arg_1, vint64m4_t arg_2, vint64m4_t arg_3, vint64m4_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vv_i64m8_vl(vint64m8_t arg_1, vint64m8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vv_i64m8_m_vl(vbool8_t arg_1, vint64m8_t arg_2, vint64m8_t arg_3, vint64m8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vx_i8m1_vl(vint8m1_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vx_i8m1_m_vl(vbool8_t arg_1, vint8m1_t arg_2, vint8m1_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vx_i8m2_vl(vint8m2_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vx_i8m2_m_vl(vbool4_t arg_1, vint8m2_t arg_2, vint8m2_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vx_i8m4_vl(vint8m4_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vx_i8m4_m_vl(vbool2_t arg_1, vint8m4_t arg_2, vint8m4_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vx_i8m8_vl(vint8m8_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vx_i8m8_m_vl(vbool1_t arg_1, vint8m8_t arg_2, vint8m8_t arg_3, int8_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vx_i8mf2_vl(vint8mf2_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vx_i8mf2_m_vl(vbool16_t arg_1, vint8mf2_t arg_2, vint8mf2_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vx_i8mf4_vl(vint8mf4_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vx_i8mf4_m_vl(vbool32_t arg_1, vint8mf4_t arg_2, vint8mf4_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vx_i8mf8_vl(vint8mf8_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vx_i8mf8_m_vl(vbool64_t arg_1, vint8mf8_t arg_2, vint8mf8_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vx_i16m1_vl(vint16m1_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vx_i16m1_m_vl(vbool16_t arg_1, vint16m1_t arg_2, vint16m1_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vx_i16m2_vl(vint16m2_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vx_i16m2_m_vl(vbool8_t arg_1, vint16m2_t arg_2, vint16m2_t arg_3, int16_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vx_i16m4_vl(vint16m4_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vx_i16m4_m_vl(vbool4_t arg_1, vint16m4_t arg_2, vint16m4_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vx_i16m8_vl(vint16m8_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vx_i16m8_m_vl(vbool2_t arg_1, vint16m8_t arg_2, vint16m8_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vx_i16mf2_vl(vint16mf2_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vx_i16mf2_m_vl(vbool32_t arg_1, vint16mf2_t arg_2, vint16mf2_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vx_i16mf4_vl(vint16mf4_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vx_i16mf4_m_vl(vbool64_t arg_1, vint16mf4_t arg_2, vint16mf4_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vx_i32m1_vl(vint32m1_t arg_1, int32_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vx_i32m1_m_vl(vbool32_t arg_1, vint32m1_t arg_2, vint32m1_t arg_3, int32_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vx_i32m2_vl(vint32m2_t arg_1, int32_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vx_i32m2_m_vl(vbool16_t arg_1, vint32m2_t arg_2, vint32m2_t arg_3, int32_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vx_i32m4_vl(vint32m4_t arg_1, int32_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vx_i32m4_m_vl(vbool8_t arg_1, vint32m4_t arg_2, vint32m4_t arg_3, int32_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vx_i32m8_vl(vint32m8_t arg_1, int32_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vx_i32m8_m_vl(vbool4_t arg_1, vint32m8_t arg_2, vint32m8_t arg_3, int32_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vx_i32mf2_vl(vint32mf2_t arg_1, int32_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vx_i32mf2_m_vl(vbool64_t arg_1, vint32mf2_t arg_2, vint32mf2_t arg_3, int32_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vx_i64m1_vl(vint64m1_t arg_1, int64_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vx_i64m1_m_vl(vbool64_t arg_1, vint64m1_t arg_2, vint64m1_t arg_3, int64_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vx_i64m2_vl(vint64m2_t arg_1, int64_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vx_i64m2_m_vl(vbool32_t arg_1, vint64m2_t arg_2, vint64m2_t arg_3, int64_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vx_i64m4_vl(vint64m4_t arg_1, int64_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vx_i64m4_m_vl(vbool16_t arg_1, vint64m4_t arg_2, vint64m4_t arg_3, int64_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vx_i64m8_vl(vint64m8_t arg_1, int64_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vx_i64m8_m_vl(vbool8_t arg_1, vint64m8_t arg_2, vint64m8_t arg_3, int64_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vv_u8m1_vl(vuint8m1_t arg_1, vuint8m1_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vv_u8m1_m_vl(vbool8_t arg_1, vuint8m1_t arg_2, vuint8m1_t arg_3, vuint8m1_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vv_u8m2_vl(vuint8m2_t arg_1, vuint8m2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vv_u8m2_m_vl(vbool4_t arg_1, vuint8m2_t arg_2, vuint8m2_t arg_3, vuint8m2_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vv_u8m4_vl(vuint8m4_t arg_1, vuint8m4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vv_u8m4_m_vl(vbool2_t arg_1, vuint8m4_t arg_2, vuint8m4_t arg_3, vuint8m4_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vv_u8m8_vl(vuint8m8_t arg_1, vuint8m8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vv_u8m8_m_vl(vbool1_t arg_1, vuint8m8_t arg_2, vuint8m8_t arg_3, vuint8m8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vv_u8mf2_vl(vuint8mf2_t arg_1, vuint8mf2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vv_u8mf2_m_vl(vbool16_t arg_1, vuint8mf2_t arg_2, vuint8mf2_t arg_3, vuint8mf2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vv_u8mf4_vl(vuint8mf4_t arg_1, vuint8mf4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vv_u8mf4_m_vl(vbool32_t arg_1, vuint8mf4_t arg_2, vuint8mf4_t arg_3, vuint8mf4_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vv_u8mf8_vl(vuint8mf8_t arg_1, vuint8mf8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vv_u8mf8_m_vl(vbool64_t arg_1, vuint8mf8_t arg_2, vuint8mf8_t arg_3, vuint8mf8_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vv_u16m1_vl(vuint16m1_t arg_1, vuint16m1_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vv_u16m1_m_vl(vbool16_t arg_1, vuint16m1_t arg_2, vuint16m1_t arg_3, vuint16m1_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vv_u16m2_vl(vuint16m2_t arg_1, vuint16m2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vv_u16m2_m_vl(vbool8_t arg_1, vuint16m2_t arg_2, vuint16m2_t arg_3, vuint16m2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vv_u16m4_vl(vuint16m4_t arg_1, vuint16m4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vv_u16m4_m_vl(vbool4_t arg_1, vuint16m4_t arg_2, vuint16m4_t arg_3, vuint16m4_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vv_u16m8_vl(vuint16m8_t arg_1, vuint16m8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vv_u16m8_m_vl(vbool2_t arg_1, vuint16m8_t arg_2, vuint16m8_t arg_3, vuint16m8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vv_u16mf2_vl(vuint16mf2_t arg_1, vuint16mf2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vv_u16mf2_m_vl(vbool32_t arg_1, vuint16mf2_t arg_2, vuint16mf2_t arg_3, vuint16mf2_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vv_u16mf4_vl(vuint16mf4_t arg_1, vuint16mf4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vv_u16mf4_m_vl(vbool64_t arg_1, vuint16mf4_t arg_2, vuint16mf4_t arg_3, vuint16mf4_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vv_u32m1_vl(vuint32m1_t arg_1, vuint32m1_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vv_u32m1_m_vl(vbool32_t arg_1, vuint32m1_t arg_2, vuint32m1_t arg_3, vuint32m1_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vv_u32m2_vl(vuint32m2_t arg_1, vuint32m2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vv_u32m2_m_vl(vbool16_t arg_1, vuint32m2_t arg_2, vuint32m2_t arg_3, vuint32m2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vv_u32m4_vl(vuint32m4_t arg_1, vuint32m4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vv_u32m4_m_vl(vbool8_t arg_1, vuint32m4_t arg_2, vuint32m4_t arg_3, vuint32m4_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vv_u32m8_vl(vuint32m8_t arg_1, vuint32m8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vv_u32m8_m_vl(vbool4_t arg_1, vuint32m8_t arg_2, vuint32m8_t arg_3, vuint32m8_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vv_u32mf2_vl(vuint32mf2_t arg_1, vuint32mf2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vv_u32mf2_m_vl(vbool64_t arg_1, vuint32mf2_t arg_2, vuint32mf2_t arg_3, vuint32mf2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vv_u64m1_vl(vuint64m1_t arg_1, vuint64m1_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vv_u64m1_m_vl(vbool64_t arg_1, vuint64m1_t arg_2, vuint64m1_t arg_3, vuint64m1_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vv_u64m2_vl(vuint64m2_t arg_1, vuint64m2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vv_u64m2_m_vl(vbool32_t arg_1, vuint64m2_t arg_2, vuint64m2_t arg_3, vuint64m2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vv_u64m4_vl(vuint64m4_t arg_1, vuint64m4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vv_u64m4_m_vl(vbool16_t arg_1, vuint64m4_t arg_2, vuint64m4_t arg_3, vuint64m4_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vv_u64m8_vl(vuint64m8_t arg_1, vuint64m8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vv_u64m8_m_vl(vbool8_t arg_1, vuint64m8_t arg_2, vuint64m8_t arg_3, vuint64m8_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vx_u8m1_vl(vuint8m1_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vx_u8m1_m_vl(vbool8_t arg_1, vuint8m1_t arg_2, vuint8m1_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vx_u8m2_vl(vuint8m2_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vx_u8m2_m_vl(vbool4_t arg_1, vuint8m2_t arg_2, vuint8m2_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vx_u8m4_vl(vuint8m4_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vx_u8m4_m_vl(vbool2_t arg_1, vuint8m4_t arg_2, vuint8m4_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vx_u8m8_vl(vuint8m8_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vx_u8m8_m_vl(vbool1_t arg_1, vuint8m8_t arg_2, vuint8m8_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vx_u8mf2_vl(vuint8mf2_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vx_u8mf2_m_vl(vbool16_t arg_1, vuint8mf2_t arg_2, vuint8mf2_t arg_3, uint8_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vx_u8mf4_vl(vuint8mf4_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vx_u8mf4_m_vl(vbool32_t arg_1, vuint8mf4_t arg_2, vuint8mf4_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vx_u8mf8_vl(vuint8mf8_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vx_u8mf8_m_vl(vbool64_t arg_1, vuint8mf8_t arg_2, vuint8mf8_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vx_u16m1_vl(vuint16m1_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vx_u16m1_m_vl(vbool16_t arg_1, vuint16m1_t arg_2, vuint16m1_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vx_u16m2_vl(vuint16m2_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vx_u16m2_m_vl(vbool8_t arg_1, vuint16m2_t arg_2, vuint16m2_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vx_u16m4_vl(vuint16m4_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vx_u16m4_m_vl(vbool4_t arg_1, vuint16m4_t arg_2, vuint16m4_t arg_3, uint16_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vx_u16m8_vl(vuint16m8_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vx_u16m8_m_vl(vbool2_t arg_1, vuint16m8_t arg_2, vuint16m8_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vx_u16mf2_vl(vuint16mf2_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vx_u16mf2_m_vl(vbool32_t arg_1, vuint16mf2_t arg_2, vuint16mf2_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vx_u16mf4_vl(vuint16mf4_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vx_u16mf4_m_vl(vbool64_t arg_1, vuint16mf4_t arg_2, vuint16mf4_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vx_u32m1_vl(vuint32m1_t arg_1, uint32_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vx_u32m1_m_vl(vbool32_t arg_1, vuint32m1_t arg_2, vuint32m1_t arg_3, uint32_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vx_u32m2_vl(vuint32m2_t arg_1, uint32_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vx_u32m2_m_vl(vbool16_t arg_1, vuint32m2_t arg_2, vuint32m2_t arg_3, uint32_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vx_u32m4_vl(vuint32m4_t arg_1, uint32_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vx_u32m4_m_vl(vbool8_t arg_1, vuint32m4_t arg_2, vuint32m4_t arg_3, uint32_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vx_u32m8_vl(vuint32m8_t arg_1, uint32_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vx_u32m8_m_vl(vbool4_t arg_1, vuint32m8_t arg_2, vuint32m8_t arg_3, uint32_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vx_u32mf2_vl(vuint32mf2_t arg_1, uint32_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vx_u32mf2_m_vl(vbool64_t arg_1, vuint32mf2_t arg_2, vuint32mf2_t arg_3, uint32_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vx_u64m1_vl(vuint64m1_t arg_1, uint64_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vx_u64m1_m_vl(vbool64_t arg_1, vuint64m1_t arg_2, vuint64m1_t arg_3, uint64_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vx_u64m2_vl(vuint64m2_t arg_1, uint64_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vx_u64m2_m_vl(vbool32_t arg_1, vuint64m2_t arg_2, vuint64m2_t arg_3, uint64_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vx_u64m4_vl(vuint64m4_t arg_1, uint64_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vx_u64m4_m_vl(vbool16_t arg_1, vuint64m4_t arg_2, vuint64m4_t arg_3, uint64_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vx_u64m8_vl(vuint64m8_t arg_1, uint64_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vx_u64m8_m_vl(vbool8_t arg_1, vuint64m8_t arg_2, vuint64m8_t arg_3, uint64_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + diff --git a/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics-generic/vfadd.c b/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics-generic/vfadd.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics-generic/vfadd.c @@ -0,0 +1,518 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t +// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t + +// NOTE: This file is autogenerated by 'bin/clang-tblgen -gen-riscv-vector-test clang/include/clang/Basic/riscv_vector.td -D=' + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_vl(vfloat32m1_t arg_1, vfloat32m1_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_m_vl(vbool32_t arg_1, vfloat32m1_t arg_2, vfloat32m1_t arg_3, vfloat32m1_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_vl(vfloat32m2_t arg_1, vfloat32m2_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_m_vl(vbool16_t arg_1, vfloat32m2_t arg_2, vfloat32m2_t arg_3, vfloat32m2_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_vl(vfloat32m4_t arg_1, vfloat32m4_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_m_vl(vbool8_t arg_1, vfloat32m4_t arg_2, vfloat32m4_t arg_3, vfloat32m4_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_vl(vfloat32m8_t arg_1, vfloat32m8_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_m_vl(vbool4_t arg_1, vfloat32m8_t arg_2, vfloat32m8_t arg_3, vfloat32m8_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_vl(vfloat32mf2_t arg_1, vfloat32mf2_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_m_vl(vbool64_t arg_1, vfloat32mf2_t arg_2, vfloat32mf2_t arg_3, vfloat32mf2_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_vl(vfloat64m1_t arg_1, vfloat64m1_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_m_vl(vbool64_t arg_1, vfloat64m1_t arg_2, vfloat64m1_t arg_3, vfloat64m1_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_vl(vfloat64m2_t arg_1, vfloat64m2_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_m_vl(vbool32_t arg_1, vfloat64m2_t arg_2, vfloat64m2_t arg_3, vfloat64m2_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_vl(vfloat64m4_t arg_1, vfloat64m4_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_m_vl(vbool16_t arg_1, vfloat64m4_t arg_2, vfloat64m4_t arg_3, vfloat64m4_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_vl(vfloat64m8_t arg_1, vfloat64m8_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_m_vl(vbool8_t arg_1, vfloat64m8_t arg_2, vfloat64m8_t arg_3, vfloat64m8_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i32( [[ARG_1:%.*]], float [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[ARG_1:%.*]], float [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_vl(vfloat32m1_t arg_1, float32_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_m_vl(vbool32_t arg_1, vfloat32m1_t arg_2, vfloat32m1_t arg_3, float32_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i32( [[ARG_1:%.*]], float [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[ARG_1:%.*]], float [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_vl(vfloat32m2_t arg_1, float32_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_m_vl(vbool16_t arg_1, vfloat32m2_t arg_2, vfloat32m2_t arg_3, float32_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i32( [[ARG_1:%.*]], float [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[ARG_1:%.*]], float [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_vl(vfloat32m4_t arg_1, float32_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_m_vl(vbool8_t arg_1, vfloat32m4_t arg_2, vfloat32m4_t arg_3, float32_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i32( [[ARG_1:%.*]], float [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[ARG_1:%.*]], float [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_vl(vfloat32m8_t arg_1, float32_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_m_vl(vbool4_t arg_1, vfloat32m8_t arg_2, vfloat32m8_t arg_3, float32_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i32( [[ARG_1:%.*]], float [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[ARG_1:%.*]], float [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_vl(vfloat32mf2_t arg_1, float32_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_m_vl(vbool64_t arg_1, vfloat32mf2_t arg_2, vfloat32mf2_t arg_3, float32_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i32( [[ARG_1:%.*]], double [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[ARG_1:%.*]], double [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_vl(vfloat64m1_t arg_1, float64_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_m_vl(vbool64_t arg_1, vfloat64m1_t arg_2, vfloat64m1_t arg_3, float64_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i32( [[ARG_1:%.*]], double [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[ARG_1:%.*]], double [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_vl(vfloat64m2_t arg_1, float64_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_m_vl(vbool32_t arg_1, vfloat64m2_t arg_2, vfloat64m2_t arg_3, float64_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i32( [[ARG_1:%.*]], double [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[ARG_1:%.*]], double [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_vl(vfloat64m4_t arg_1, float64_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_m_vl(vbool16_t arg_1, vfloat64m4_t arg_2, vfloat64m4_t arg_3, float64_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i32( [[ARG_1:%.*]], double [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[ARG_1:%.*]], double [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_vl(vfloat64m8_t arg_1, float64_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_m_vl(vbool8_t arg_1, vfloat64m8_t arg_2, vfloat64m8_t arg_3, float64_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + diff --git a/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics/vadd.c b/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics/vadd.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics/vadd.c @@ -0,0 +1,2478 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t +// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t + +// NOTE: This file is autogenerated by 'bin/clang-tblgen -gen-riscv-vector-test clang/include/clang/Basic/riscv_vector.td -D=' + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vv_i8m1_vl(vint8m1_t arg_1, vint8m1_t arg_2, size_t arg_3) { + return vadd_vv_i8m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vv_i8m1_m_vl(vbool8_t arg_1, vint8m1_t arg_2, vint8m1_t arg_3, vint8m1_t arg_4, size_t arg_5) { + return vadd_vv_i8m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vv_i8m2_vl(vint8m2_t arg_1, vint8m2_t arg_2, size_t arg_3) { + return vadd_vv_i8m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vv_i8m2_m_vl(vbool4_t arg_1, vint8m2_t arg_2, vint8m2_t arg_3, vint8m2_t arg_4, size_t arg_5) { + return vadd_vv_i8m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vv_i8m4_vl(vint8m4_t arg_1, vint8m4_t arg_2, size_t arg_3) { + return vadd_vv_i8m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vv_i8m4_m_vl(vbool2_t arg_1, vint8m4_t arg_2, vint8m4_t arg_3, vint8m4_t arg_4, size_t arg_5) { + return vadd_vv_i8m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vv_i8m8_vl(vint8m8_t arg_1, vint8m8_t arg_2, size_t arg_3) { + return vadd_vv_i8m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vv_i8m8_m_vl(vbool1_t arg_1, vint8m8_t arg_2, vint8m8_t arg_3, vint8m8_t arg_4, size_t arg_5) { + return vadd_vv_i8m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vv_i8mf2_vl(vint8mf2_t arg_1, vint8mf2_t arg_2, size_t arg_3) { + return vadd_vv_i8mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vv_i8mf2_m_vl(vbool16_t arg_1, vint8mf2_t arg_2, vint8mf2_t arg_3, vint8mf2_t arg_4, size_t arg_5) { + return vadd_vv_i8mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vv_i8mf4_vl(vint8mf4_t arg_1, vint8mf4_t arg_2, size_t arg_3) { + return vadd_vv_i8mf4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vv_i8mf4_m_vl(vbool32_t arg_1, vint8mf4_t arg_2, vint8mf4_t arg_3, vint8mf4_t arg_4, size_t arg_5) { + return vadd_vv_i8mf4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vv_i8mf8_vl(vint8mf8_t arg_1, vint8mf8_t arg_2, size_t arg_3) { + return vadd_vv_i8mf8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vv_i8mf8_m_vl(vbool64_t arg_1, vint8mf8_t arg_2, vint8mf8_t arg_3, vint8mf8_t arg_4, size_t arg_5) { + return vadd_vv_i8mf8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vv_i16m1_vl(vint16m1_t arg_1, vint16m1_t arg_2, size_t arg_3) { + return vadd_vv_i16m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vv_i16m1_m_vl(vbool16_t arg_1, vint16m1_t arg_2, vint16m1_t arg_3, vint16m1_t arg_4, size_t arg_5) { + return vadd_vv_i16m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vv_i16m2_vl(vint16m2_t arg_1, vint16m2_t arg_2, size_t arg_3) { + return vadd_vv_i16m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vv_i16m2_m_vl(vbool8_t arg_1, vint16m2_t arg_2, vint16m2_t arg_3, vint16m2_t arg_4, size_t arg_5) { + return vadd_vv_i16m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vv_i16m4_vl(vint16m4_t arg_1, vint16m4_t arg_2, size_t arg_3) { + return vadd_vv_i16m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vv_i16m4_m_vl(vbool4_t arg_1, vint16m4_t arg_2, vint16m4_t arg_3, vint16m4_t arg_4, size_t arg_5) { + return vadd_vv_i16m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vv_i16m8_vl(vint16m8_t arg_1, vint16m8_t arg_2, size_t arg_3) { + return vadd_vv_i16m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vv_i16m8_m_vl(vbool2_t arg_1, vint16m8_t arg_2, vint16m8_t arg_3, vint16m8_t arg_4, size_t arg_5) { + return vadd_vv_i16m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vv_i16mf2_vl(vint16mf2_t arg_1, vint16mf2_t arg_2, size_t arg_3) { + return vadd_vv_i16mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vv_i16mf2_m_vl(vbool32_t arg_1, vint16mf2_t arg_2, vint16mf2_t arg_3, vint16mf2_t arg_4, size_t arg_5) { + return vadd_vv_i16mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vv_i16mf4_vl(vint16mf4_t arg_1, vint16mf4_t arg_2, size_t arg_3) { + return vadd_vv_i16mf4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vv_i16mf4_m_vl(vbool64_t arg_1, vint16mf4_t arg_2, vint16mf4_t arg_3, vint16mf4_t arg_4, size_t arg_5) { + return vadd_vv_i16mf4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vv_i32m1_vl(vint32m1_t arg_1, vint32m1_t arg_2, size_t arg_3) { + return vadd_vv_i32m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vv_i32m1_m_vl(vbool32_t arg_1, vint32m1_t arg_2, vint32m1_t arg_3, vint32m1_t arg_4, size_t arg_5) { + return vadd_vv_i32m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vv_i32m2_vl(vint32m2_t arg_1, vint32m2_t arg_2, size_t arg_3) { + return vadd_vv_i32m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vv_i32m2_m_vl(vbool16_t arg_1, vint32m2_t arg_2, vint32m2_t arg_3, vint32m2_t arg_4, size_t arg_5) { + return vadd_vv_i32m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vv_i32m4_vl(vint32m4_t arg_1, vint32m4_t arg_2, size_t arg_3) { + return vadd_vv_i32m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vv_i32m4_m_vl(vbool8_t arg_1, vint32m4_t arg_2, vint32m4_t arg_3, vint32m4_t arg_4, size_t arg_5) { + return vadd_vv_i32m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vv_i32m8_vl(vint32m8_t arg_1, vint32m8_t arg_2, size_t arg_3) { + return vadd_vv_i32m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vv_i32m8_m_vl(vbool4_t arg_1, vint32m8_t arg_2, vint32m8_t arg_3, vint32m8_t arg_4, size_t arg_5) { + return vadd_vv_i32m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vv_i32mf2_vl(vint32mf2_t arg_1, vint32mf2_t arg_2, size_t arg_3) { + return vadd_vv_i32mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vv_i32mf2_m_vl(vbool64_t arg_1, vint32mf2_t arg_2, vint32mf2_t arg_3, vint32mf2_t arg_4, size_t arg_5) { + return vadd_vv_i32mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vv_i64m1_vl(vint64m1_t arg_1, vint64m1_t arg_2, size_t arg_3) { + return vadd_vv_i64m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vv_i64m1_m_vl(vbool64_t arg_1, vint64m1_t arg_2, vint64m1_t arg_3, vint64m1_t arg_4, size_t arg_5) { + return vadd_vv_i64m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vv_i64m2_vl(vint64m2_t arg_1, vint64m2_t arg_2, size_t arg_3) { + return vadd_vv_i64m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vv_i64m2_m_vl(vbool32_t arg_1, vint64m2_t arg_2, vint64m2_t arg_3, vint64m2_t arg_4, size_t arg_5) { + return vadd_vv_i64m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vv_i64m4_vl(vint64m4_t arg_1, vint64m4_t arg_2, size_t arg_3) { + return vadd_vv_i64m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vv_i64m4_m_vl(vbool16_t arg_1, vint64m4_t arg_2, vint64m4_t arg_3, vint64m4_t arg_4, size_t arg_5) { + return vadd_vv_i64m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vv_i64m8_vl(vint64m8_t arg_1, vint64m8_t arg_2, size_t arg_3) { + return vadd_vv_i64m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_i64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vv_i64m8_m_vl(vbool8_t arg_1, vint64m8_t arg_2, vint64m8_t arg_3, vint64m8_t arg_4, size_t arg_5) { + return vadd_vv_i64m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vx_i8m1_vl(vint8m1_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vx_i8m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vx_i8m1_m_vl(vbool8_t arg_1, vint8m1_t arg_2, vint8m1_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_vx_i8m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vx_i8m2_vl(vint8m2_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vx_i8m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vx_i8m2_m_vl(vbool4_t arg_1, vint8m2_t arg_2, vint8m2_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_vx_i8m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vx_i8m4_vl(vint8m4_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vx_i8m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vx_i8m4_m_vl(vbool2_t arg_1, vint8m4_t arg_2, vint8m4_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_vx_i8m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vx_i8m8_vl(vint8m8_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vx_i8m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vx_i8m8_m_vl(vbool1_t arg_1, vint8m8_t arg_2, vint8m8_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_vx_i8m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vx_i8mf2_vl(vint8mf2_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vx_i8mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vx_i8mf2_m_vl(vbool16_t arg_1, vint8mf2_t arg_2, vint8mf2_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_vx_i8mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vx_i8mf4_vl(vint8mf4_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vx_i8mf4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vx_i8mf4_m_vl(vbool32_t arg_1, vint8mf4_t arg_2, vint8mf4_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_vx_i8mf4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vx_i8mf8_vl(vint8mf8_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vx_i8mf8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vx_i8mf8_m_vl(vbool64_t arg_1, vint8mf8_t arg_2, vint8mf8_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_vx_i8mf8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vx_i16m1_vl(vint16m1_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vx_i16m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vx_i16m1_m_vl(vbool16_t arg_1, vint16m1_t arg_2, vint16m1_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_vx_i16m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vx_i16m2_vl(vint16m2_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vx_i16m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vx_i16m2_m_vl(vbool8_t arg_1, vint16m2_t arg_2, vint16m2_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_vx_i16m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vx_i16m4_vl(vint16m4_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vx_i16m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vx_i16m4_m_vl(vbool4_t arg_1, vint16m4_t arg_2, vint16m4_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_vx_i16m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vx_i16m8_vl(vint16m8_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vx_i16m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vx_i16m8_m_vl(vbool2_t arg_1, vint16m8_t arg_2, vint16m8_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_vx_i16m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vx_i16mf2_vl(vint16mf2_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vx_i16mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vx_i16mf2_m_vl(vbool32_t arg_1, vint16mf2_t arg_2, vint16mf2_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_vx_i16mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vx_i16mf4_vl(vint16mf4_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vx_i16mf4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vx_i16mf4_m_vl(vbool64_t arg_1, vint16mf4_t arg_2, vint16mf4_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_vx_i16mf4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vx_i32m1_vl(vint32m1_t arg_1, int32_t arg_2, size_t arg_3) { + return vadd_vx_i32m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vx_i32m1_m_vl(vbool32_t arg_1, vint32m1_t arg_2, vint32m1_t arg_3, int32_t arg_4, size_t arg_5) { + return vadd_vx_i32m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vx_i32m2_vl(vint32m2_t arg_1, int32_t arg_2, size_t arg_3) { + return vadd_vx_i32m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vx_i32m2_m_vl(vbool16_t arg_1, vint32m2_t arg_2, vint32m2_t arg_3, int32_t arg_4, size_t arg_5) { + return vadd_vx_i32m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vx_i32m4_vl(vint32m4_t arg_1, int32_t arg_2, size_t arg_3) { + return vadd_vx_i32m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vx_i32m4_m_vl(vbool8_t arg_1, vint32m4_t arg_2, vint32m4_t arg_3, int32_t arg_4, size_t arg_5) { + return vadd_vx_i32m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vx_i32m8_vl(vint32m8_t arg_1, int32_t arg_2, size_t arg_3) { + return vadd_vx_i32m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vx_i32m8_m_vl(vbool4_t arg_1, vint32m8_t arg_2, vint32m8_t arg_3, int32_t arg_4, size_t arg_5) { + return vadd_vx_i32m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vx_i32mf2_vl(vint32mf2_t arg_1, int32_t arg_2, size_t arg_3) { + return vadd_vx_i32mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vx_i32mf2_m_vl(vbool64_t arg_1, vint32mf2_t arg_2, vint32mf2_t arg_3, int32_t arg_4, size_t arg_5) { + return vadd_vx_i32mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vx_i64m1_vl(vint64m1_t arg_1, int64_t arg_2, size_t arg_3) { + return vadd_vx_i64m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vx_i64m1_m_vl(vbool64_t arg_1, vint64m1_t arg_2, vint64m1_t arg_3, int64_t arg_4, size_t arg_5) { + return vadd_vx_i64m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vx_i64m2_vl(vint64m2_t arg_1, int64_t arg_2, size_t arg_3) { + return vadd_vx_i64m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vx_i64m2_m_vl(vbool32_t arg_1, vint64m2_t arg_2, vint64m2_t arg_3, int64_t arg_4, size_t arg_5) { + return vadd_vx_i64m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vx_i64m4_vl(vint64m4_t arg_1, int64_t arg_2, size_t arg_3) { + return vadd_vx_i64m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vx_i64m4_m_vl(vbool16_t arg_1, vint64m4_t arg_2, vint64m4_t arg_3, int64_t arg_4, size_t arg_5) { + return vadd_vx_i64m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vx_i64m8_vl(vint64m8_t arg_1, int64_t arg_2, size_t arg_3) { + return vadd_vx_i64m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_i64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vx_i64m8_m_vl(vbool8_t arg_1, vint64m8_t arg_2, vint64m8_t arg_3, int64_t arg_4, size_t arg_5) { + return vadd_vx_i64m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vv_u8m1_vl(vuint8m1_t arg_1, vuint8m1_t arg_2, size_t arg_3) { + return vadd_vv_u8m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vv_u8m1_m_vl(vbool8_t arg_1, vuint8m1_t arg_2, vuint8m1_t arg_3, vuint8m1_t arg_4, size_t arg_5) { + return vadd_vv_u8m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vv_u8m2_vl(vuint8m2_t arg_1, vuint8m2_t arg_2, size_t arg_3) { + return vadd_vv_u8m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vv_u8m2_m_vl(vbool4_t arg_1, vuint8m2_t arg_2, vuint8m2_t arg_3, vuint8m2_t arg_4, size_t arg_5) { + return vadd_vv_u8m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vv_u8m4_vl(vuint8m4_t arg_1, vuint8m4_t arg_2, size_t arg_3) { + return vadd_vv_u8m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vv_u8m4_m_vl(vbool2_t arg_1, vuint8m4_t arg_2, vuint8m4_t arg_3, vuint8m4_t arg_4, size_t arg_5) { + return vadd_vv_u8m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vv_u8m8_vl(vuint8m8_t arg_1, vuint8m8_t arg_2, size_t arg_3) { + return vadd_vv_u8m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vv_u8m8_m_vl(vbool1_t arg_1, vuint8m8_t arg_2, vuint8m8_t arg_3, vuint8m8_t arg_4, size_t arg_5) { + return vadd_vv_u8m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vv_u8mf2_vl(vuint8mf2_t arg_1, vuint8mf2_t arg_2, size_t arg_3) { + return vadd_vv_u8mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vv_u8mf2_m_vl(vbool16_t arg_1, vuint8mf2_t arg_2, vuint8mf2_t arg_3, vuint8mf2_t arg_4, size_t arg_5) { + return vadd_vv_u8mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vv_u8mf4_vl(vuint8mf4_t arg_1, vuint8mf4_t arg_2, size_t arg_3) { + return vadd_vv_u8mf4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vv_u8mf4_m_vl(vbool32_t arg_1, vuint8mf4_t arg_2, vuint8mf4_t arg_3, vuint8mf4_t arg_4, size_t arg_5) { + return vadd_vv_u8mf4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vv_u8mf8_vl(vuint8mf8_t arg_1, vuint8mf8_t arg_2, size_t arg_3) { + return vadd_vv_u8mf8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vv_u8mf8_m_vl(vbool64_t arg_1, vuint8mf8_t arg_2, vuint8mf8_t arg_3, vuint8mf8_t arg_4, size_t arg_5) { + return vadd_vv_u8mf8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vv_u16m1_vl(vuint16m1_t arg_1, vuint16m1_t arg_2, size_t arg_3) { + return vadd_vv_u16m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vv_u16m1_m_vl(vbool16_t arg_1, vuint16m1_t arg_2, vuint16m1_t arg_3, vuint16m1_t arg_4, size_t arg_5) { + return vadd_vv_u16m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vv_u16m2_vl(vuint16m2_t arg_1, vuint16m2_t arg_2, size_t arg_3) { + return vadd_vv_u16m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vv_u16m2_m_vl(vbool8_t arg_1, vuint16m2_t arg_2, vuint16m2_t arg_3, vuint16m2_t arg_4, size_t arg_5) { + return vadd_vv_u16m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vv_u16m4_vl(vuint16m4_t arg_1, vuint16m4_t arg_2, size_t arg_3) { + return vadd_vv_u16m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vv_u16m4_m_vl(vbool4_t arg_1, vuint16m4_t arg_2, vuint16m4_t arg_3, vuint16m4_t arg_4, size_t arg_5) { + return vadd_vv_u16m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vv_u16m8_vl(vuint16m8_t arg_1, vuint16m8_t arg_2, size_t arg_3) { + return vadd_vv_u16m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vv_u16m8_m_vl(vbool2_t arg_1, vuint16m8_t arg_2, vuint16m8_t arg_3, vuint16m8_t arg_4, size_t arg_5) { + return vadd_vv_u16m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vv_u16mf2_vl(vuint16mf2_t arg_1, vuint16mf2_t arg_2, size_t arg_3) { + return vadd_vv_u16mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vv_u16mf2_m_vl(vbool32_t arg_1, vuint16mf2_t arg_2, vuint16mf2_t arg_3, vuint16mf2_t arg_4, size_t arg_5) { + return vadd_vv_u16mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vv_u16mf4_vl(vuint16mf4_t arg_1, vuint16mf4_t arg_2, size_t arg_3) { + return vadd_vv_u16mf4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vv_u16mf4_m_vl(vbool64_t arg_1, vuint16mf4_t arg_2, vuint16mf4_t arg_3, vuint16mf4_t arg_4, size_t arg_5) { + return vadd_vv_u16mf4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vv_u32m1_vl(vuint32m1_t arg_1, vuint32m1_t arg_2, size_t arg_3) { + return vadd_vv_u32m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vv_u32m1_m_vl(vbool32_t arg_1, vuint32m1_t arg_2, vuint32m1_t arg_3, vuint32m1_t arg_4, size_t arg_5) { + return vadd_vv_u32m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vv_u32m2_vl(vuint32m2_t arg_1, vuint32m2_t arg_2, size_t arg_3) { + return vadd_vv_u32m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vv_u32m2_m_vl(vbool16_t arg_1, vuint32m2_t arg_2, vuint32m2_t arg_3, vuint32m2_t arg_4, size_t arg_5) { + return vadd_vv_u32m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vv_u32m4_vl(vuint32m4_t arg_1, vuint32m4_t arg_2, size_t arg_3) { + return vadd_vv_u32m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vv_u32m4_m_vl(vbool8_t arg_1, vuint32m4_t arg_2, vuint32m4_t arg_3, vuint32m4_t arg_4, size_t arg_5) { + return vadd_vv_u32m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vv_u32m8_vl(vuint32m8_t arg_1, vuint32m8_t arg_2, size_t arg_3) { + return vadd_vv_u32m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vv_u32m8_m_vl(vbool4_t arg_1, vuint32m8_t arg_2, vuint32m8_t arg_3, vuint32m8_t arg_4, size_t arg_5) { + return vadd_vv_u32m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vv_u32mf2_vl(vuint32mf2_t arg_1, vuint32mf2_t arg_2, size_t arg_3) { + return vadd_vv_u32mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vv_u32mf2_m_vl(vbool64_t arg_1, vuint32mf2_t arg_2, vuint32mf2_t arg_3, vuint32mf2_t arg_4, size_t arg_5) { + return vadd_vv_u32mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vv_u64m1_vl(vuint64m1_t arg_1, vuint64m1_t arg_2, size_t arg_3) { + return vadd_vv_u64m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vv_u64m1_m_vl(vbool64_t arg_1, vuint64m1_t arg_2, vuint64m1_t arg_3, vuint64m1_t arg_4, size_t arg_5) { + return vadd_vv_u64m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vv_u64m2_vl(vuint64m2_t arg_1, vuint64m2_t arg_2, size_t arg_3) { + return vadd_vv_u64m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vv_u64m2_m_vl(vbool32_t arg_1, vuint64m2_t arg_2, vuint64m2_t arg_3, vuint64m2_t arg_4, size_t arg_5) { + return vadd_vv_u64m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vv_u64m4_vl(vuint64m4_t arg_1, vuint64m4_t arg_2, size_t arg_3) { + return vadd_vv_u64m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vv_u64m4_m_vl(vbool16_t arg_1, vuint64m4_t arg_2, vuint64m4_t arg_3, vuint64m4_t arg_4, size_t arg_5) { + return vadd_vv_u64m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vv_u64m8_vl(vuint64m8_t arg_1, vuint64m8_t arg_2, size_t arg_3) { + return vadd_vv_u64m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vv_u64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vv_u64m8_m_vl(vbool8_t arg_1, vuint64m8_t arg_2, vuint64m8_t arg_3, vuint64m8_t arg_4, size_t arg_5) { + return vadd_vv_u64m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vx_u8m1_vl(vuint8m1_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vx_u8m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vx_u8m1_m_vl(vbool8_t arg_1, vuint8m1_t arg_2, vuint8m1_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_vx_u8m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vx_u8m2_vl(vuint8m2_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vx_u8m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vx_u8m2_m_vl(vbool4_t arg_1, vuint8m2_t arg_2, vuint8m2_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_vx_u8m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vx_u8m4_vl(vuint8m4_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vx_u8m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vx_u8m4_m_vl(vbool2_t arg_1, vuint8m4_t arg_2, vuint8m4_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_vx_u8m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vx_u8m8_vl(vuint8m8_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vx_u8m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vx_u8m8_m_vl(vbool1_t arg_1, vuint8m8_t arg_2, vuint8m8_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_vx_u8m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vx_u8mf2_vl(vuint8mf2_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vx_u8mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vx_u8mf2_m_vl(vbool16_t arg_1, vuint8mf2_t arg_2, vuint8mf2_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_vx_u8mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vx_u8mf4_vl(vuint8mf4_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vx_u8mf4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vx_u8mf4_m_vl(vbool32_t arg_1, vuint8mf4_t arg_2, vuint8mf4_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_vx_u8mf4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vx_u8mf8_vl(vuint8mf8_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vx_u8mf8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vx_u8mf8_m_vl(vbool64_t arg_1, vuint8mf8_t arg_2, vuint8mf8_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_vx_u8mf8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vx_u16m1_vl(vuint16m1_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vx_u16m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vx_u16m1_m_vl(vbool16_t arg_1, vuint16m1_t arg_2, vuint16m1_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_vx_u16m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vx_u16m2_vl(vuint16m2_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vx_u16m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vx_u16m2_m_vl(vbool8_t arg_1, vuint16m2_t arg_2, vuint16m2_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_vx_u16m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vx_u16m4_vl(vuint16m4_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vx_u16m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vx_u16m4_m_vl(vbool4_t arg_1, vuint16m4_t arg_2, vuint16m4_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_vx_u16m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vx_u16m8_vl(vuint16m8_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vx_u16m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vx_u16m8_m_vl(vbool2_t arg_1, vuint16m8_t arg_2, vuint16m8_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_vx_u16m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vx_u16mf2_vl(vuint16mf2_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vx_u16mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vx_u16mf2_m_vl(vbool32_t arg_1, vuint16mf2_t arg_2, vuint16mf2_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_vx_u16mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vx_u16mf4_vl(vuint16mf4_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vx_u16mf4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vx_u16mf4_m_vl(vbool64_t arg_1, vuint16mf4_t arg_2, vuint16mf4_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_vx_u16mf4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vx_u32m1_vl(vuint32m1_t arg_1, uint32_t arg_2, size_t arg_3) { + return vadd_vx_u32m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vx_u32m1_m_vl(vbool32_t arg_1, vuint32m1_t arg_2, vuint32m1_t arg_3, uint32_t arg_4, size_t arg_5) { + return vadd_vx_u32m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vx_u32m2_vl(vuint32m2_t arg_1, uint32_t arg_2, size_t arg_3) { + return vadd_vx_u32m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vx_u32m2_m_vl(vbool16_t arg_1, vuint32m2_t arg_2, vuint32m2_t arg_3, uint32_t arg_4, size_t arg_5) { + return vadd_vx_u32m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vx_u32m4_vl(vuint32m4_t arg_1, uint32_t arg_2, size_t arg_3) { + return vadd_vx_u32m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vx_u32m4_m_vl(vbool8_t arg_1, vuint32m4_t arg_2, vuint32m4_t arg_3, uint32_t arg_4, size_t arg_5) { + return vadd_vx_u32m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vx_u32m8_vl(vuint32m8_t arg_1, uint32_t arg_2, size_t arg_3) { + return vadd_vx_u32m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vx_u32m8_m_vl(vbool4_t arg_1, vuint32m8_t arg_2, vuint32m8_t arg_3, uint32_t arg_4, size_t arg_5) { + return vadd_vx_u32m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vx_u32mf2_vl(vuint32mf2_t arg_1, uint32_t arg_2, size_t arg_3) { + return vadd_vx_u32mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vx_u32mf2_m_vl(vbool64_t arg_1, vuint32mf2_t arg_2, vuint32mf2_t arg_3, uint32_t arg_4, size_t arg_5) { + return vadd_vx_u32mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vx_u64m1_vl(vuint64m1_t arg_1, uint64_t arg_2, size_t arg_3) { + return vadd_vx_u64m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vx_u64m1_m_vl(vbool64_t arg_1, vuint64m1_t arg_2, vuint64m1_t arg_3, uint64_t arg_4, size_t arg_5) { + return vadd_vx_u64m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vx_u64m2_vl(vuint64m2_t arg_1, uint64_t arg_2, size_t arg_3) { + return vadd_vx_u64m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vx_u64m2_m_vl(vbool32_t arg_1, vuint64m2_t arg_2, vuint64m2_t arg_3, uint64_t arg_4, size_t arg_5) { + return vadd_vx_u64m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vx_u64m4_vl(vuint64m4_t arg_1, uint64_t arg_2, size_t arg_3) { + return vadd_vx_u64m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vx_u64m4_m_vl(vbool16_t arg_1, vuint64m4_t arg_2, vuint64m4_t arg_3, uint64_t arg_4, size_t arg_5) { + return vadd_vx_u64m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vx_u64m8_vl(vuint64m8_t arg_1, uint64_t arg_2, size_t arg_3) { + return vadd_vx_u64m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vadd_vx_u64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vx_u64m8_m_vl(vbool8_t arg_1, vuint64m8_t arg_2, vuint64m8_t arg_3, uint64_t arg_4, size_t arg_5) { + return vadd_vx_u64m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + diff --git a/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics/vfadd.c b/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics/vfadd.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics/vfadd.c @@ -0,0 +1,518 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t +// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t + +// NOTE: This file is autogenerated by 'bin/clang-tblgen -gen-riscv-vector-test clang/include/clang/Basic/riscv_vector.td -D=' + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_vl(vfloat32m1_t arg_1, vfloat32m1_t arg_2, size_t arg_3) { + return vfadd_vv_f32m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_m_vl(vbool32_t arg_1, vfloat32m1_t arg_2, vfloat32m1_t arg_3, vfloat32m1_t arg_4, size_t arg_5) { + return vfadd_vv_f32m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_vl(vfloat32m2_t arg_1, vfloat32m2_t arg_2, size_t arg_3) { + return vfadd_vv_f32m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_m_vl(vbool16_t arg_1, vfloat32m2_t arg_2, vfloat32m2_t arg_3, vfloat32m2_t arg_4, size_t arg_5) { + return vfadd_vv_f32m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_vl(vfloat32m4_t arg_1, vfloat32m4_t arg_2, size_t arg_3) { + return vfadd_vv_f32m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_m_vl(vbool8_t arg_1, vfloat32m4_t arg_2, vfloat32m4_t arg_3, vfloat32m4_t arg_4, size_t arg_5) { + return vfadd_vv_f32m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_vl(vfloat32m8_t arg_1, vfloat32m8_t arg_2, size_t arg_3) { + return vfadd_vv_f32m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_m_vl(vbool4_t arg_1, vfloat32m8_t arg_2, vfloat32m8_t arg_3, vfloat32m8_t arg_4, size_t arg_5) { + return vfadd_vv_f32m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_vl(vfloat32mf2_t arg_1, vfloat32mf2_t arg_2, size_t arg_3) { + return vfadd_vv_f32mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_m_vl(vbool64_t arg_1, vfloat32mf2_t arg_2, vfloat32mf2_t arg_3, vfloat32mf2_t arg_4, size_t arg_5) { + return vfadd_vv_f32mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_vl(vfloat64m1_t arg_1, vfloat64m1_t arg_2, size_t arg_3) { + return vfadd_vv_f64m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_m_vl(vbool64_t arg_1, vfloat64m1_t arg_2, vfloat64m1_t arg_3, vfloat64m1_t arg_4, size_t arg_5) { + return vfadd_vv_f64m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_vl(vfloat64m2_t arg_1, vfloat64m2_t arg_2, size_t arg_3) { + return vfadd_vv_f64m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_m_vl(vbool32_t arg_1, vfloat64m2_t arg_2, vfloat64m2_t arg_3, vfloat64m2_t arg_4, size_t arg_5) { + return vfadd_vv_f64m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_vl(vfloat64m4_t arg_1, vfloat64m4_t arg_2, size_t arg_3) { + return vfadd_vv_f64m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_m_vl(vbool16_t arg_1, vfloat64m4_t arg_2, vfloat64m4_t arg_3, vfloat64m4_t arg_4, size_t arg_5) { + return vfadd_vv_f64m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_vl(vfloat64m8_t arg_1, vfloat64m8_t arg_2, size_t arg_3) { + return vfadd_vv_f64m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_m_vl(vbool8_t arg_1, vfloat64m8_t arg_2, vfloat64m8_t arg_3, vfloat64m8_t arg_4, size_t arg_5) { + return vfadd_vv_f64m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i32( [[ARG_1:%.*]], float [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[ARG_1:%.*]], float [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_vl(vfloat32m1_t arg_1, float32_t arg_2, size_t arg_3) { + return vfadd_vf_f32m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_m_vl(vbool32_t arg_1, vfloat32m1_t arg_2, vfloat32m1_t arg_3, float32_t arg_4, size_t arg_5) { + return vfadd_vf_f32m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i32( [[ARG_1:%.*]], float [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[ARG_1:%.*]], float [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_vl(vfloat32m2_t arg_1, float32_t arg_2, size_t arg_3) { + return vfadd_vf_f32m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_m_vl(vbool16_t arg_1, vfloat32m2_t arg_2, vfloat32m2_t arg_3, float32_t arg_4, size_t arg_5) { + return vfadd_vf_f32m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i32( [[ARG_1:%.*]], float [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[ARG_1:%.*]], float [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_vl(vfloat32m4_t arg_1, float32_t arg_2, size_t arg_3) { + return vfadd_vf_f32m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_m_vl(vbool8_t arg_1, vfloat32m4_t arg_2, vfloat32m4_t arg_3, float32_t arg_4, size_t arg_5) { + return vfadd_vf_f32m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i32( [[ARG_1:%.*]], float [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[ARG_1:%.*]], float [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_vl(vfloat32m8_t arg_1, float32_t arg_2, size_t arg_3) { + return vfadd_vf_f32m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_m_vl(vbool4_t arg_1, vfloat32m8_t arg_2, vfloat32m8_t arg_3, float32_t arg_4, size_t arg_5) { + return vfadd_vf_f32m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i32( [[ARG_1:%.*]], float [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[ARG_1:%.*]], float [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_vl(vfloat32mf2_t arg_1, float32_t arg_2, size_t arg_3) { + return vfadd_vf_f32mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_m_vl(vbool64_t arg_1, vfloat32mf2_t arg_2, vfloat32mf2_t arg_3, float32_t arg_4, size_t arg_5) { + return vfadd_vf_f32mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i32( [[ARG_1:%.*]], double [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[ARG_1:%.*]], double [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_vl(vfloat64m1_t arg_1, float64_t arg_2, size_t arg_3) { + return vfadd_vf_f64m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_m_vl(vbool64_t arg_1, vfloat64m1_t arg_2, vfloat64m1_t arg_3, float64_t arg_4, size_t arg_5) { + return vfadd_vf_f64m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i32( [[ARG_1:%.*]], double [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[ARG_1:%.*]], double [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_vl(vfloat64m2_t arg_1, float64_t arg_2, size_t arg_3) { + return vfadd_vf_f64m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_m_vl(vbool32_t arg_1, vfloat64m2_t arg_2, vfloat64m2_t arg_3, float64_t arg_4, size_t arg_5) { + return vfadd_vf_f64m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i32( [[ARG_1:%.*]], double [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[ARG_1:%.*]], double [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_vl(vfloat64m4_t arg_1, float64_t arg_2, size_t arg_3) { + return vfadd_vf_f64m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_m_vl(vbool16_t arg_1, vfloat64m4_t arg_2, vfloat64m4_t arg_3, float64_t arg_4, size_t arg_5) { + return vfadd_vf_f64m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i32( [[ARG_1:%.*]], double [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[ARG_1:%.*]], double [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_vl(vfloat64m8_t arg_1, float64_t arg_2, size_t arg_3) { + return vfadd_vf_f64m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_m_vl(vbool8_t arg_1, vfloat64m8_t arg_2, vfloat64m8_t arg_3, float64_t arg_4, size_t arg_5) { + return vfadd_vf_f64m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + diff --git a/clang/test/CodeGen/RISCV/vadd.c b/clang/test/CodeGen/RISCV/vadd.c deleted file mode 100644 --- a/clang/test/CodeGen/RISCV/vadd.c +++ /dev/null @@ -1,2648 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py -// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v \ -// RUN: -O2 -emit-llvm %s -o - | FileCheck --check-prefix=CHECK-RV64-O2 %s -// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v \ -// RUN: -O2 -emit-llvm %s -o - | FileCheck --check-prefix=CHECK-RV32-O2 %s - -#include -#include - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m1_t test_vadd_vv_i8m1_vl(__rvv_int8m1_t arg_0, __rvv_int8m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i8m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m1_t test_vadd_vv_i8m1_m_vl(__rvv_bool8_t arg_0, __rvv_int8m1_t arg_1, __rvv_int8m1_t arg_2, __rvv_int8m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i8m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m1_t test_vadd_vv_i16m1_vl(__rvv_int16m1_t arg_0, __rvv_int16m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i16m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m1_t test_vadd_vv_i16m1_m_vl(__rvv_bool16_t arg_0, __rvv_int16m1_t arg_1, __rvv_int16m1_t arg_2, __rvv_int16m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i16m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m1_t test_vadd_vv_i32m1_vl(__rvv_int32m1_t arg_0, __rvv_int32m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i32m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m1_t test_vadd_vv_i32m1_m_vl(__rvv_bool32_t arg_0, __rvv_int32m1_t arg_1, __rvv_int32m1_t arg_2, __rvv_int32m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i32m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m1_t test_vadd_vv_i64m1_vl(__rvv_int64m1_t arg_0, __rvv_int64m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i64m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m1_t test_vadd_vv_i64m1_m_vl(__rvv_bool64_t arg_0, __rvv_int64m1_t arg_1, __rvv_int64m1_t arg_2, __rvv_int64m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i64m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m2_t test_vadd_vv_i8m2_vl(__rvv_int8m2_t arg_0, __rvv_int8m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i8m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m2_t test_vadd_vv_i8m2_m_vl(__rvv_bool4_t arg_0, __rvv_int8m2_t arg_1, __rvv_int8m2_t arg_2, __rvv_int8m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i8m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m2_t test_vadd_vv_i16m2_vl(__rvv_int16m2_t arg_0, __rvv_int16m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i16m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m2_t test_vadd_vv_i16m2_m_vl(__rvv_bool8_t arg_0, __rvv_int16m2_t arg_1, __rvv_int16m2_t arg_2, __rvv_int16m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i16m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m2_t test_vadd_vv_i32m2_vl(__rvv_int32m2_t arg_0, __rvv_int32m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i32m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m2_t test_vadd_vv_i32m2_m_vl(__rvv_bool16_t arg_0, __rvv_int32m2_t arg_1, __rvv_int32m2_t arg_2, __rvv_int32m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i32m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m2_t test_vadd_vv_i64m2_vl(__rvv_int64m2_t arg_0, __rvv_int64m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i64m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m2_t test_vadd_vv_i64m2_m_vl(__rvv_bool32_t arg_0, __rvv_int64m2_t arg_1, __rvv_int64m2_t arg_2, __rvv_int64m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i64m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m4_t test_vadd_vv_i8m4_vl(__rvv_int8m4_t arg_0, __rvv_int8m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i8m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m4_t test_vadd_vv_i8m4_m_vl(__rvv_bool2_t arg_0, __rvv_int8m4_t arg_1, __rvv_int8m4_t arg_2, __rvv_int8m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i8m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m4_t test_vadd_vv_i16m4_vl(__rvv_int16m4_t arg_0, __rvv_int16m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i16m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m4_t test_vadd_vv_i16m4_m_vl(__rvv_bool4_t arg_0, __rvv_int16m4_t arg_1, __rvv_int16m4_t arg_2, __rvv_int16m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i16m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m4_t test_vadd_vv_i32m4_vl(__rvv_int32m4_t arg_0, __rvv_int32m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i32m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m4_t test_vadd_vv_i32m4_m_vl(__rvv_bool8_t arg_0, __rvv_int32m4_t arg_1, __rvv_int32m4_t arg_2, __rvv_int32m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i32m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m4_t test_vadd_vv_i64m4_vl(__rvv_int64m4_t arg_0, __rvv_int64m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i64m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m4_t test_vadd_vv_i64m4_m_vl(__rvv_bool16_t arg_0, __rvv_int64m4_t arg_1, __rvv_int64m4_t arg_2, __rvv_int64m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i64m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m8_t test_vadd_vv_i8m8_vl(__rvv_int8m8_t arg_0, __rvv_int8m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i8m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m8_t test_vadd_vv_i8m8_m_vl(__rvv_bool1_t arg_0, __rvv_int8m8_t arg_1, __rvv_int8m8_t arg_2, __rvv_int8m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i8m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m8_t test_vadd_vv_i16m8_vl(__rvv_int16m8_t arg_0, __rvv_int16m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i16m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m8_t test_vadd_vv_i16m8_m_vl(__rvv_bool2_t arg_0, __rvv_int16m8_t arg_1, __rvv_int16m8_t arg_2, __rvv_int16m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i16m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m8_t test_vadd_vv_i32m8_vl(__rvv_int32m8_t arg_0, __rvv_int32m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i32m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m8_t test_vadd_vv_i32m8_m_vl(__rvv_bool4_t arg_0, __rvv_int32m8_t arg_1, __rvv_int32m8_t arg_2, __rvv_int32m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i32m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m8_t test_vadd_vv_i64m8_vl(__rvv_int64m8_t arg_0, __rvv_int64m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i64m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m8_t test_vadd_vv_i64m8_m_vl(__rvv_bool8_t arg_0, __rvv_int64m8_t arg_1, __rvv_int64m8_t arg_2, __rvv_int64m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i64m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf2_t test_vadd_vv_i8mf2_vl(__rvv_int8mf2_t arg_0, __rvv_int8mf2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i8mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf2_t test_vadd_vv_i8mf2_m_vl(__rvv_bool16_t arg_0, __rvv_int8mf2_t arg_1, __rvv_int8mf2_t arg_2, __rvv_int8mf2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i8mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf2_t test_vadd_vv_i16mf2_vl(__rvv_int16mf2_t arg_0, __rvv_int16mf2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i16mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf2_t test_vadd_vv_i16mf2_m_vl(__rvv_bool32_t arg_0, __rvv_int16mf2_t arg_1, __rvv_int16mf2_t arg_2, __rvv_int16mf2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i16mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32mf2_t test_vadd_vv_i32mf2_vl(__rvv_int32mf2_t arg_0, __rvv_int32mf2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i32mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32mf2_t test_vadd_vv_i32mf2_m_vl(__rvv_bool64_t arg_0, __rvv_int32mf2_t arg_1, __rvv_int32mf2_t arg_2, __rvv_int32mf2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i32mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf4_t test_vadd_vv_i8mf4_vl(__rvv_int8mf4_t arg_0, __rvv_int8mf4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i8mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf4_t test_vadd_vv_i8mf4_m_vl(__rvv_bool32_t arg_0, __rvv_int8mf4_t arg_1, __rvv_int8mf4_t arg_2, __rvv_int8mf4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i8mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf4_t test_vadd_vv_i16mf4_vl(__rvv_int16mf4_t arg_0, __rvv_int16mf4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i16mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf4_t test_vadd_vv_i16mf4_m_vl(__rvv_bool64_t arg_0, __rvv_int16mf4_t arg_1, __rvv_int16mf4_t arg_2, __rvv_int16mf4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i16mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf8_t test_vadd_vv_i8mf8_vl(__rvv_int8mf8_t arg_0, __rvv_int8mf8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i8mf8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf8_t test_vadd_vv_i8mf8_m_vl(__rvv_bool64_t arg_0, __rvv_int8mf8_t arg_1, __rvv_int8mf8_t arg_2, __rvv_int8mf8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i8mf8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m1_t test_vadd_vx_i8m1_vl(__rvv_int8m1_t arg_0, int8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i8m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m1_t test_vadd_vx_i8m1_m_vl(__rvv_bool8_t arg_0, __rvv_int8m1_t arg_1, __rvv_int8m1_t arg_2, int8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i8m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m1_t test_vadd_vx_i16m1_vl(__rvv_int16m1_t arg_0, int16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i16m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m1_t test_vadd_vx_i16m1_m_vl(__rvv_bool16_t arg_0, __rvv_int16m1_t arg_1, __rvv_int16m1_t arg_2, int16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i16m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m1_t test_vadd_vx_i32m1_vl(__rvv_int32m1_t arg_0, int32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i32m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m1_t test_vadd_vx_i32m1_m_vl(__rvv_bool32_t arg_0, __rvv_int32m1_t arg_1, __rvv_int32m1_t arg_2, int32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i32m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m1_t test_vadd_vx_i64m1_vl(__rvv_int64m1_t arg_0, int64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i64m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m1_t test_vadd_vx_i64m1_m_vl(__rvv_bool64_t arg_0, __rvv_int64m1_t arg_1, __rvv_int64m1_t arg_2, int64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i64m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m2_t test_vadd_vx_i8m2_vl(__rvv_int8m2_t arg_0, int8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i8m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m2_t test_vadd_vx_i8m2_m_vl(__rvv_bool4_t arg_0, __rvv_int8m2_t arg_1, __rvv_int8m2_t arg_2, int8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i8m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m2_t test_vadd_vx_i16m2_vl(__rvv_int16m2_t arg_0, int16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i16m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m2_t test_vadd_vx_i16m2_m_vl(__rvv_bool8_t arg_0, __rvv_int16m2_t arg_1, __rvv_int16m2_t arg_2, int16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i16m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m2_t test_vadd_vx_i32m2_vl(__rvv_int32m2_t arg_0, int32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i32m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m2_t test_vadd_vx_i32m2_m_vl(__rvv_bool16_t arg_0, __rvv_int32m2_t arg_1, __rvv_int32m2_t arg_2, int32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i32m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m2_t test_vadd_vx_i64m2_vl(__rvv_int64m2_t arg_0, int64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i64m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m2_t test_vadd_vx_i64m2_m_vl(__rvv_bool32_t arg_0, __rvv_int64m2_t arg_1, __rvv_int64m2_t arg_2, int64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i64m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m4_t test_vadd_vx_i8m4_vl(__rvv_int8m4_t arg_0, int8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i8m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m4_t test_vadd_vx_i8m4_m_vl(__rvv_bool2_t arg_0, __rvv_int8m4_t arg_1, __rvv_int8m4_t arg_2, int8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i8m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m4_t test_vadd_vx_i16m4_vl(__rvv_int16m4_t arg_0, int16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i16m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m4_t test_vadd_vx_i16m4_m_vl(__rvv_bool4_t arg_0, __rvv_int16m4_t arg_1, __rvv_int16m4_t arg_2, int16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i16m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m4_t test_vadd_vx_i32m4_vl(__rvv_int32m4_t arg_0, int32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i32m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m4_t test_vadd_vx_i32m4_m_vl(__rvv_bool8_t arg_0, __rvv_int32m4_t arg_1, __rvv_int32m4_t arg_2, int32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i32m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m4_t test_vadd_vx_i64m4_vl(__rvv_int64m4_t arg_0, int64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i64m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m4_t test_vadd_vx_i64m4_m_vl(__rvv_bool16_t arg_0, __rvv_int64m4_t arg_1, __rvv_int64m4_t arg_2, int64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i64m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m8_t test_vadd_vx_i8m8_vl(__rvv_int8m8_t arg_0, int8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i8m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m8_t test_vadd_vx_i8m8_m_vl(__rvv_bool1_t arg_0, __rvv_int8m8_t arg_1, __rvv_int8m8_t arg_2, int8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i8m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m8_t test_vadd_vx_i16m8_vl(__rvv_int16m8_t arg_0, int16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i16m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m8_t test_vadd_vx_i16m8_m_vl(__rvv_bool2_t arg_0, __rvv_int16m8_t arg_1, __rvv_int16m8_t arg_2, int16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i16m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m8_t test_vadd_vx_i32m8_vl(__rvv_int32m8_t arg_0, int32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i32m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m8_t test_vadd_vx_i32m8_m_vl(__rvv_bool4_t arg_0, __rvv_int32m8_t arg_1, __rvv_int32m8_t arg_2, int32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i32m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m8_t test_vadd_vx_i64m8_vl(__rvv_int64m8_t arg_0, int64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i64m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m8_t test_vadd_vx_i64m8_m_vl(__rvv_bool8_t arg_0, __rvv_int64m8_t arg_1, __rvv_int64m8_t arg_2, int64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i64m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf2_t test_vadd_vx_i8mf2_vl(__rvv_int8mf2_t arg_0, int8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i8mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf2_t test_vadd_vx_i8mf2_m_vl(__rvv_bool16_t arg_0, __rvv_int8mf2_t arg_1, __rvv_int8mf2_t arg_2, int8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i8mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf2_t test_vadd_vx_i16mf2_vl(__rvv_int16mf2_t arg_0, int16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i16mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf2_t test_vadd_vx_i16mf2_m_vl(__rvv_bool32_t arg_0, __rvv_int16mf2_t arg_1, __rvv_int16mf2_t arg_2, int16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i16mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32mf2_t test_vadd_vx_i32mf2_vl(__rvv_int32mf2_t arg_0, int32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i32mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32mf2_t test_vadd_vx_i32mf2_m_vl(__rvv_bool64_t arg_0, __rvv_int32mf2_t arg_1, __rvv_int32mf2_t arg_2, int32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i32mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf4_t test_vadd_vx_i8mf4_vl(__rvv_int8mf4_t arg_0, int8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i8mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf4_t test_vadd_vx_i8mf4_m_vl(__rvv_bool32_t arg_0, __rvv_int8mf4_t arg_1, __rvv_int8mf4_t arg_2, int8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i8mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf4_t test_vadd_vx_i16mf4_vl(__rvv_int16mf4_t arg_0, int16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i16mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf4_t test_vadd_vx_i16mf4_m_vl(__rvv_bool64_t arg_0, __rvv_int16mf4_t arg_1, __rvv_int16mf4_t arg_2, int16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i16mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf8_t test_vadd_vx_i8mf8_vl(__rvv_int8mf8_t arg_0, int8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i8mf8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf8_t test_vadd_vx_i8mf8_m_vl(__rvv_bool64_t arg_0, __rvv_int8mf8_t arg_1, __rvv_int8mf8_t arg_2, int8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i8mf8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m1_t test_vadd_vv_u8m1_vl(__rvv_uint8m1_t arg_0, __rvv_uint8m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u8m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m1_t test_vadd_vv_u8m1_m_vl(__rvv_bool8_t arg_0, __rvv_uint8m1_t arg_1, __rvv_uint8m1_t arg_2, __rvv_uint8m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u8m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m1_t test_vadd_vv_u16m1_vl(__rvv_uint16m1_t arg_0, __rvv_uint16m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u16m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m1_t test_vadd_vv_u16m1_m_vl(__rvv_bool16_t arg_0, __rvv_uint16m1_t arg_1, __rvv_uint16m1_t arg_2, __rvv_uint16m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u16m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m1_t test_vadd_vv_u32m1_vl(__rvv_uint32m1_t arg_0, __rvv_uint32m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u32m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m1_t test_vadd_vv_u32m1_m_vl(__rvv_bool32_t arg_0, __rvv_uint32m1_t arg_1, __rvv_uint32m1_t arg_2, __rvv_uint32m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u32m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m1_t test_vadd_vv_u64m1_vl(__rvv_uint64m1_t arg_0, __rvv_uint64m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u64m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m1_t test_vadd_vv_u64m1_m_vl(__rvv_bool64_t arg_0, __rvv_uint64m1_t arg_1, __rvv_uint64m1_t arg_2, __rvv_uint64m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u64m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m2_t test_vadd_vv_u8m2_vl(__rvv_uint8m2_t arg_0, __rvv_uint8m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u8m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m2_t test_vadd_vv_u8m2_m_vl(__rvv_bool4_t arg_0, __rvv_uint8m2_t arg_1, __rvv_uint8m2_t arg_2, __rvv_uint8m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u8m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m2_t test_vadd_vv_u16m2_vl(__rvv_uint16m2_t arg_0, __rvv_uint16m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u16m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m2_t test_vadd_vv_u16m2_m_vl(__rvv_bool8_t arg_0, __rvv_uint16m2_t arg_1, __rvv_uint16m2_t arg_2, __rvv_uint16m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u16m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m2_t test_vadd_vv_u32m2_vl(__rvv_uint32m2_t arg_0, __rvv_uint32m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u32m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m2_t test_vadd_vv_u32m2_m_vl(__rvv_bool16_t arg_0, __rvv_uint32m2_t arg_1, __rvv_uint32m2_t arg_2, __rvv_uint32m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u32m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m2_t test_vadd_vv_u64m2_vl(__rvv_uint64m2_t arg_0, __rvv_uint64m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u64m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m2_t test_vadd_vv_u64m2_m_vl(__rvv_bool32_t arg_0, __rvv_uint64m2_t arg_1, __rvv_uint64m2_t arg_2, __rvv_uint64m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u64m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m4_t test_vadd_vv_u8m4_vl(__rvv_uint8m4_t arg_0, __rvv_uint8m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u8m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m4_t test_vadd_vv_u8m4_m_vl(__rvv_bool2_t arg_0, __rvv_uint8m4_t arg_1, __rvv_uint8m4_t arg_2, __rvv_uint8m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u8m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m4_t test_vadd_vv_u16m4_vl(__rvv_uint16m4_t arg_0, __rvv_uint16m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u16m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m4_t test_vadd_vv_u16m4_m_vl(__rvv_bool4_t arg_0, __rvv_uint16m4_t arg_1, __rvv_uint16m4_t arg_2, __rvv_uint16m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u16m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m4_t test_vadd_vv_u32m4_vl(__rvv_uint32m4_t arg_0, __rvv_uint32m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u32m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m4_t test_vadd_vv_u32m4_m_vl(__rvv_bool8_t arg_0, __rvv_uint32m4_t arg_1, __rvv_uint32m4_t arg_2, __rvv_uint32m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u32m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m4_t test_vadd_vv_u64m4_vl(__rvv_uint64m4_t arg_0, __rvv_uint64m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u64m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m4_t test_vadd_vv_u64m4_m_vl(__rvv_bool16_t arg_0, __rvv_uint64m4_t arg_1, __rvv_uint64m4_t arg_2, __rvv_uint64m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u64m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m8_t test_vadd_vv_u8m8_vl(__rvv_uint8m8_t arg_0, __rvv_uint8m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u8m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m8_t test_vadd_vv_u8m8_m_vl(__rvv_bool1_t arg_0, __rvv_uint8m8_t arg_1, __rvv_uint8m8_t arg_2, __rvv_uint8m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u8m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m8_t test_vadd_vv_u16m8_vl(__rvv_uint16m8_t arg_0, __rvv_uint16m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u16m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m8_t test_vadd_vv_u16m8_m_vl(__rvv_bool2_t arg_0, __rvv_uint16m8_t arg_1, __rvv_uint16m8_t arg_2, __rvv_uint16m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u16m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m8_t test_vadd_vv_u32m8_vl(__rvv_uint32m8_t arg_0, __rvv_uint32m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u32m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m8_t test_vadd_vv_u32m8_m_vl(__rvv_bool4_t arg_0, __rvv_uint32m8_t arg_1, __rvv_uint32m8_t arg_2, __rvv_uint32m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u32m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m8_t test_vadd_vv_u64m8_vl(__rvv_uint64m8_t arg_0, __rvv_uint64m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u64m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m8_t test_vadd_vv_u64m8_m_vl(__rvv_bool8_t arg_0, __rvv_uint64m8_t arg_1, __rvv_uint64m8_t arg_2, __rvv_uint64m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u64m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf2_t test_vadd_vv_u8mf2_vl(__rvv_uint8mf2_t arg_0, __rvv_uint8mf2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u8mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf2_t test_vadd_vv_u8mf2_m_vl(__rvv_bool16_t arg_0, __rvv_uint8mf2_t arg_1, __rvv_uint8mf2_t arg_2, __rvv_uint8mf2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u8mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf2_t test_vadd_vv_u16mf2_vl(__rvv_uint16mf2_t arg_0, __rvv_uint16mf2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u16mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf2_t test_vadd_vv_u16mf2_m_vl(__rvv_bool32_t arg_0, __rvv_uint16mf2_t arg_1, __rvv_uint16mf2_t arg_2, __rvv_uint16mf2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u16mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32mf2_t test_vadd_vv_u32mf2_vl(__rvv_uint32mf2_t arg_0, __rvv_uint32mf2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u32mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32mf2_t test_vadd_vv_u32mf2_m_vl(__rvv_bool64_t arg_0, __rvv_uint32mf2_t arg_1, __rvv_uint32mf2_t arg_2, __rvv_uint32mf2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u32mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf4_t test_vadd_vv_u8mf4_vl(__rvv_uint8mf4_t arg_0, __rvv_uint8mf4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u8mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf4_t test_vadd_vv_u8mf4_m_vl(__rvv_bool32_t arg_0, __rvv_uint8mf4_t arg_1, __rvv_uint8mf4_t arg_2, __rvv_uint8mf4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u8mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf4_t test_vadd_vv_u16mf4_vl(__rvv_uint16mf4_t arg_0, __rvv_uint16mf4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u16mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf4_t test_vadd_vv_u16mf4_m_vl(__rvv_bool64_t arg_0, __rvv_uint16mf4_t arg_1, __rvv_uint16mf4_t arg_2, __rvv_uint16mf4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u16mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf8_t test_vadd_vv_u8mf8_vl(__rvv_uint8mf8_t arg_0, __rvv_uint8mf8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u8mf8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf8_t test_vadd_vv_u8mf8_m_vl(__rvv_bool64_t arg_0, __rvv_uint8mf8_t arg_1, __rvv_uint8mf8_t arg_2, __rvv_uint8mf8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u8mf8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m1_t test_vadd_vx_u8m1_vl(__rvv_uint8m1_t arg_0, uint8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u8m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m1_t test_vadd_vx_u8m1_m_vl(__rvv_bool8_t arg_0, __rvv_uint8m1_t arg_1, __rvv_uint8m1_t arg_2, uint8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u8m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m1_t test_vadd_vx_u16m1_vl(__rvv_uint16m1_t arg_0, uint16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u16m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m1_t test_vadd_vx_u16m1_m_vl(__rvv_bool16_t arg_0, __rvv_uint16m1_t arg_1, __rvv_uint16m1_t arg_2, uint16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u16m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m1_t test_vadd_vx_u32m1_vl(__rvv_uint32m1_t arg_0, uint32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u32m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m1_t test_vadd_vx_u32m1_m_vl(__rvv_bool32_t arg_0, __rvv_uint32m1_t arg_1, __rvv_uint32m1_t arg_2, uint32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u32m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m1_t test_vadd_vx_u64m1_vl(__rvv_uint64m1_t arg_0, uint64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u64m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m1_t test_vadd_vx_u64m1_m_vl(__rvv_bool64_t arg_0, __rvv_uint64m1_t arg_1, __rvv_uint64m1_t arg_2, uint64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u64m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m2_t test_vadd_vx_u8m2_vl(__rvv_uint8m2_t arg_0, uint8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u8m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m2_t test_vadd_vx_u8m2_m_vl(__rvv_bool4_t arg_0, __rvv_uint8m2_t arg_1, __rvv_uint8m2_t arg_2, uint8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u8m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m2_t test_vadd_vx_u16m2_vl(__rvv_uint16m2_t arg_0, uint16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u16m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m2_t test_vadd_vx_u16m2_m_vl(__rvv_bool8_t arg_0, __rvv_uint16m2_t arg_1, __rvv_uint16m2_t arg_2, uint16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u16m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m2_t test_vadd_vx_u32m2_vl(__rvv_uint32m2_t arg_0, uint32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u32m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m2_t test_vadd_vx_u32m2_m_vl(__rvv_bool16_t arg_0, __rvv_uint32m2_t arg_1, __rvv_uint32m2_t arg_2, uint32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u32m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m2_t test_vadd_vx_u64m2_vl(__rvv_uint64m2_t arg_0, uint64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u64m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m2_t test_vadd_vx_u64m2_m_vl(__rvv_bool32_t arg_0, __rvv_uint64m2_t arg_1, __rvv_uint64m2_t arg_2, uint64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u64m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m4_t test_vadd_vx_u8m4_vl(__rvv_uint8m4_t arg_0, uint8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u8m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m4_t test_vadd_vx_u8m4_m_vl(__rvv_bool2_t arg_0, __rvv_uint8m4_t arg_1, __rvv_uint8m4_t arg_2, uint8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u8m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m4_t test_vadd_vx_u16m4_vl(__rvv_uint16m4_t arg_0, uint16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u16m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m4_t test_vadd_vx_u16m4_m_vl(__rvv_bool4_t arg_0, __rvv_uint16m4_t arg_1, __rvv_uint16m4_t arg_2, uint16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u16m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m4_t test_vadd_vx_u32m4_vl(__rvv_uint32m4_t arg_0, uint32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u32m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m4_t test_vadd_vx_u32m4_m_vl(__rvv_bool8_t arg_0, __rvv_uint32m4_t arg_1, __rvv_uint32m4_t arg_2, uint32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u32m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m4_t test_vadd_vx_u64m4_vl(__rvv_uint64m4_t arg_0, uint64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u64m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m4_t test_vadd_vx_u64m4_m_vl(__rvv_bool16_t arg_0, __rvv_uint64m4_t arg_1, __rvv_uint64m4_t arg_2, uint64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u64m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m8_t test_vadd_vx_u8m8_vl(__rvv_uint8m8_t arg_0, uint8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u8m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m8_t test_vadd_vx_u8m8_m_vl(__rvv_bool1_t arg_0, __rvv_uint8m8_t arg_1, __rvv_uint8m8_t arg_2, uint8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u8m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m8_t test_vadd_vx_u16m8_vl(__rvv_uint16m8_t arg_0, uint16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u16m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m8_t test_vadd_vx_u16m8_m_vl(__rvv_bool2_t arg_0, __rvv_uint16m8_t arg_1, __rvv_uint16m8_t arg_2, uint16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u16m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m8_t test_vadd_vx_u32m8_vl(__rvv_uint32m8_t arg_0, uint32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u32m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m8_t test_vadd_vx_u32m8_m_vl(__rvv_bool4_t arg_0, __rvv_uint32m8_t arg_1, __rvv_uint32m8_t arg_2, uint32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u32m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m8_t test_vadd_vx_u64m8_vl(__rvv_uint64m8_t arg_0, uint64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u64m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m8_t test_vadd_vx_u64m8_m_vl(__rvv_bool8_t arg_0, __rvv_uint64m8_t arg_1, __rvv_uint64m8_t arg_2, uint64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u64m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf2_t test_vadd_vx_u8mf2_vl(__rvv_uint8mf2_t arg_0, uint8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u8mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf2_t test_vadd_vx_u8mf2_m_vl(__rvv_bool16_t arg_0, __rvv_uint8mf2_t arg_1, __rvv_uint8mf2_t arg_2, uint8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u8mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf2_t test_vadd_vx_u16mf2_vl(__rvv_uint16mf2_t arg_0, uint16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u16mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf2_t test_vadd_vx_u16mf2_m_vl(__rvv_bool32_t arg_0, __rvv_uint16mf2_t arg_1, __rvv_uint16mf2_t arg_2, uint16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u16mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32mf2_t test_vadd_vx_u32mf2_vl(__rvv_uint32mf2_t arg_0, uint32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u32mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32mf2_t test_vadd_vx_u32mf2_m_vl(__rvv_bool64_t arg_0, __rvv_uint32mf2_t arg_1, __rvv_uint32mf2_t arg_2, uint32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u32mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf4_t test_vadd_vx_u8mf4_vl(__rvv_uint8mf4_t arg_0, uint8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u8mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf4_t test_vadd_vx_u8mf4_m_vl(__rvv_bool32_t arg_0, __rvv_uint8mf4_t arg_1, __rvv_uint8mf4_t arg_2, uint8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u8mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf4_t test_vadd_vx_u16mf4_vl(__rvv_uint16mf4_t arg_0, uint16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u16mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf4_t test_vadd_vx_u16mf4_m_vl(__rvv_bool64_t arg_0, __rvv_uint16mf4_t arg_1, __rvv_uint16mf4_t arg_2, uint16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u16mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf8_t test_vadd_vx_u8mf8_vl(__rvv_uint8mf8_t arg_0, uint8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u8mf8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf8_t test_vadd_vx_u8mf8_m_vl(__rvv_bool64_t arg_0, __rvv_uint8mf8_t arg_1, __rvv_uint8mf8_t arg_2, uint8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u8mf8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} diff --git a/clang/test/Headers/riscv-vector-header.c b/clang/test/Headers/riscv-vector-header.c new file mode 100644 --- /dev/null +++ b/clang/test/Headers/riscv-vector-header.c @@ -0,0 +1,6 @@ +// RUN: %clang_cc1 -triple riscv64 -fsyntax-only \ +// RUN: -target-feature +m -target-feature +a -target-feature +f \ +// RUN: -target-feature +d -target-feature +experimental-v %s +// expected-no-diagnostics + +#include diff --git a/clang/utils/TableGen/CMakeLists.txt b/clang/utils/TableGen/CMakeLists.txt --- a/clang/utils/TableGen/CMakeLists.txt +++ b/clang/utils/TableGen/CMakeLists.txt @@ -18,6 +18,7 @@ ClangTypeNodesEmitter.cpp MveEmitter.cpp NeonEmitter.cpp + RISCVVEmitter.cpp SveEmitter.cpp TableGen.cpp ) diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp new file mode 100644 --- /dev/null +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -0,0 +1,1112 @@ +//===- RISCVVEmitter.cpp - Generate riscv_vector.h for use with clang -----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This tablegen backend is responsible for emitting riscv_vector.h and +// riscv_vector_generic.h, which includes a declaration and definition of each +// intrinsic fucntions specified in https://github.com/riscv/rvv-intrinsic-doc. +// +// See also the documentation in include/clang/Basic/riscv_vector.td. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/ADT/StringSet.h" +#include "llvm/ADT/Twine.h" +#include "llvm/TableGen/Record.h" + +using namespace llvm; +using BasicType = char; +using VScaleVal = Optional; + +namespace { + +// Exponential LMUL +class ELMULType { +private: + int ELMUL; + +public: + ELMULType() : ELMULType(0) {} + ELMULType(int ELMUL); + // Return the C/C++ string representation of LMUL + std::string str() const; + Optional getVscale(unsigned ElementBitwidth) const; + ELMULType &operator*=(unsigned RHS); +}; + +// This class is compact representation of a valid and invalid RVVType. +class RVVType { + BasicType BT; + ELMULType LMUL; + bool Float, Bool, Signed; + // Constant indices are "int", but have the constant expression. + bool Immediate; + bool Void; + // const qualifier. + bool Constant; + bool Pointer; + bool SIZE_T, PtrDiff_T; + unsigned ElementBitwidth; + VScaleVal Vscale; + bool Valid; + + std::string BuiltinStr; + std::string ClangBuiltinStr; + std::string Str; + std::string ShortStr; + +public: + RVVType() : RVVType(BasicType(), 0, StringRef()) {} + RVVType(BasicType BT, int ELMUL, StringRef prototype); + + // Return the string representation of a type, which is an encoded string for + // passing to the BUILTIN() macro in Builtins.def. + const std::string &builtin_str() const { return BuiltinStr; } + + // Return the clang buitlin type for RVV vector type which are used in the + // riscv_vector.h header file. + const std::string &clang_builtin_str() const { return ClangBuiltinStr; } + + // Return the C/C++ string representation of a type for use in the + // riscv_vector.h header file. + const std::string &type_str() const { return Str; } + + // Return the short name of a type for C/C++ name suffix. + const std::string &short_str() const { return ShortStr; } + + bool isValid() const { return Valid; } + bool isScalar() const { return Vscale.hasValue() && Vscale.getValue() == 0; } + bool isVector() const { return Vscale.hasValue() && Vscale.getValue() != 0; } + bool isHalfVector() const { + return isVector() && Float && ElementBitwidth == 16; + } + bool isFloatVector() const { + return isVector() && Float && ElementBitwidth == 32; + } + bool isDoubleVector() const { + return isVector() && Float && ElementBitwidth == 64; + } + +private: + // Verify RVV vector type and set Valid. + bool verifyType() const; + + // Creates a type based on basic types of TypeRange + void applyBasicType(); + + // Applies a prototype modifier to the current type. The result maybe an + // invalid type. + void applyModifier(StringRef prototype); + + // Compute and record a string for legal type. + void compute_builtin_str(); + // Compute and record a builtin RVV vector type string. + void compute_clang_builtin_str(); + // Compute and record a type string for used in the header. + void compute_type_str(); + // Compute and record a short name of a type for C/C++ name suffix. + void compute_short_str(); +}; + +using RVVTypePtr = RVVType *; +using RVVTypes = std::vector; + +enum class RISCV_Extension : uint8_t { + Basic = 0, + F = 1 << 1, + D = 1 << 2, + ZFH = 1 << 3 +}; + +// TODO refactor RVVIntrinsic class design after support all intrinsic +// combination. This represents an instantiation of an intrinsic with a +// particular type and prototype +class RVVIntrinsic { + +private: + std::string Name; // Builtin name + std::string MangledName; + std::string IRName; + bool HasSideEffects; + bool IsMask; + bool HasMergeOperand; + bool HasVL; + bool HasGeneric; + RVVTypes Types; // Include output and other input + std::vector IntrinsicTypes; // Type name in LLVM IR intrinsic suffix + uint8_t RISCV_Extensions = 0; + +public: + RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledSuffix, + StringRef IRName, bool HasSideEffects, bool IsMask, + bool HasMergeOperand, bool HasVL, bool HasGeneric, + const RVVTypes &Types, + const std::vector &RVVIntrinsicTypes); + ~RVVIntrinsic() = default; + + StringRef getName() const { return Name; } + StringRef getMangledName() const { return MangledName; } + bool hasSideEffects() const { return HasSideEffects; } + bool hasMergeOperand() const { return HasMergeOperand; } + bool hasVL() const { return HasVL; } + bool hasGeneric() const { return HasGeneric; } + size_t getNumOperand() const { return Types.size() - 1; } + // Get output and input types + ArrayRef getTypes() const { return Types; } + ArrayRef getIntrinsicTypes() const { return IntrinsicTypes; } + std::string getIRName() const { return IRName; } + uint8_t getRISCV_Extensions() const { + return static_cast(RISCV_Extensions); + } + + // Return the type string for a BUILTIN() macro in Builtins.def. + std::string getBuiltinTypeStr() const; + + // Emit the code block for switch body in EmitRISCVBuiltinExpr, it should + // init the RVVIntrinsic ID and IntrinsicTypes. + void emitCodeGenSwitchBody(raw_ostream &o) const; + + // Return the function declaration with given function name. argument naming + // are arg_1, arg_2, ... + std::string getFuncDelc(Twine FuncName) const; + + /// Return the function definition for Clang and test + std::string createFunction(Twine FuncName, Twine CalleeName) const; +}; + +using TypeString = std::string; +class RVVEmitter { +private: + RecordKeeper &Records; + // Concat BasicType, LMUL and Proto as key + StringMap LegalTypes; + StringSet<> IllegalTypes; + +public: + RVVEmitter(RecordKeeper &R) : Records(R) {} + + /// Emit riscv_vector.h + void createHeader(raw_ostream &o); + + /// Emit riscv_generic.h + void createGenericHeader(raw_ostream &o); + + /// Emit all the __builtin prototypes and code needed by Sema. + void createBuiltins(raw_ostream &o); + + /// Emit all the information needed to map builtin -> LLVM IR intrinsic. + void createCodeGen(raw_ostream &o); + + /// Emit RISCV Vector tests for Clang + void createTest(raw_ostream &o, bool IsGeneric); + +private: + /// Create all intrinsics and add them to \p Out + void createRVVIntrinsics(std::vector> &Out); + /// Compute output and input types by applying different config (basic type + /// and LMUL with type transformers). It also record result of type in legal + /// or illegal set to avoid compute the same config again. The result maybe + /// have illegal RVVType. + Optional computeTypes(BasicType BT, int ELMUL, + ArrayRef PrototypeSeq); + Optional computeType(BasicType BT, int ELMUL, StringRef Proto); + + /// Emit Acrh predecessor definitions and body + void emitArchMacroAndBody( + std::vector> &Defs, raw_ostream &o, + std::function); + + // Return the architecture preprocessor definitions. + SmallVector getExtStrings(uint8_t Extensions); +}; + +} // namespace + +//===----------------------------------------------------------------------===// +// Type implementation +//===----------------------------------------------------------------------===// + +ELMULType::ELMULType(int NewELMUL) { + // Check ELMUL is -3, -2, -1, 0, 1, 2, 3 + assert(NewELMUL <= 3 && NewELMUL >= -3 && "Bad LMUL number!"); + ELMUL = NewELMUL; +} + +std::string ELMULType::str() const { + if (ELMUL < 0) + return "mf" + utostr(1 << (-ELMUL)); + return "m" + utostr(1 << ELMUL); +} + +VScaleVal ELMULType::getVscale(unsigned ElementBitwidth) const { + int ExpResult = 0; + switch (ElementBitwidth) { + default: + break; + case 8: + ExpResult = ELMUL + 3; + break; + case 16: + ExpResult = ELMUL + 2; + break; + case 32: + ExpResult = ELMUL + 1; + break; + case 64: + ExpResult = ELMUL; + break; + } + // Illegal vscale result would be less than 1 + if (ExpResult < 0) + return None; + return Optional(1 << ExpResult); +} + +ELMULType &ELMULType::operator*=(unsigned RHS) { + this->ELMUL = this->ELMUL + RHS; + return *this; +} + +RVVType::RVVType(BasicType BT, int ELMUL, StringRef prototype) + : BT(BT), LMUL(ELMULType(ELMUL)), Float(false), Bool(false), Signed(true), + Immediate(false), Void(false), Constant(false), Pointer(false), + SIZE_T(false), PtrDiff_T(false), ElementBitwidth(~0U), Vscale(0) { + applyBasicType(); + applyModifier(prototype); + Valid = verifyType(); + if (Valid) { + compute_builtin_str(); + compute_type_str(); + if (isVector()) { + compute_clang_builtin_str(); + compute_short_str(); + } + } +} + +// Legal RVV vector type combination: +// bool | nxv1i1 | nxv2i1 | nxv4i1 | nxv8i1 | nxv16i1 | nxv32i1 | nxv64i1 + +// type\lmul |1/8 |1/4 |1/2 |1 |2 |4 |8 +// -------- |------ |-------- |------- |------- |--------|-------- |-------- +// i64 |N/A |N/A |N/A |nxv1i64 |nxv2i64 |nxv4i64 |nxv8i64 +// i32 |N/A |N/A |nxv1i32 |nxv2i32 |nxv4i32 |nxv8i32 |nxv16i32 +// i16 |N/A |nxv1i16 |nxv2i16 |nxv4i16 |nxv8i16 |nxv16i16 |nxv32i16 +// i8 |nxv1i8 |nxv2i8 |nxv4i8 |nxv8i8 |nxv16i8 |nxv32i8 |nxv64i8 +// double |N/A |N/A |N/A |nxv1f64 |nxv2f64 |nxv4f64 |nxv8f64 +// float |N/A |N/A |nxv1f32 |nxv2f32 |nxv4f32 |nxv8f32 |nxv16f32 +// half |N/A |nxv1f16 |nxv2f16 |nxv4f16 |nxv8f16 |nxv16f16 |nxv32f16 +bool RVVType::verifyType() const { + if (isScalar()) + return true; + if (!Vscale.hasValue()) + return false; + if (Float && ElementBitwidth == 8) + return false; + unsigned V = Vscale.getValue(); + switch (ElementBitwidth) { + case 1: + case 8: + // Check Vscale is 1,2,4,8,16,32,64 + return (V <= 64 && countPopulation(V) == 1); + case 16: + // Check Vscale is 1,2,4,8,16,32 + return (V <= 32 && countPopulation(V) == 1); + case 32: + // Check Vscale is 1,2,4,8,16 + return (V <= 16 && countPopulation(V) == 1); + case 64: + // Check Vscale is 1,2,4,8 + return (V <= 8 && countPopulation(V) == 1); + } + return false; +} + +void RVVType::compute_builtin_str() { + assert(isValid() && "RVVType is invalid"); + std::string &S = BuiltinStr; + if (Void) { + S = "v"; + return; + } else if (SIZE_T) { + S = "z"; + if (Immediate) + S = "I" + S; + return; + } else if (PtrDiff_T) { + S = "Y"; + return; + } + + if (!Float) { + switch (ElementBitwidth) { + case 1: + S += "b"; + break; + case 8: + S += "c"; + break; + case 16: + S += "s"; + break; + case 32: + S += "i"; + break; + case 64: + S += "Wi"; + break; + default: + llvm_unreachable("Unhandled case!"); + } + } else { + switch (ElementBitwidth) { + case 16: + S += "h"; + break; + case 32: + S += "f"; + break; + case 64: + S += "d"; + break; + default: + llvm_unreachable("Unhandled case!"); + } + } + if (!Float && !Bool) { + if (Signed) + S = "S" + S; + else + S = "U" + S; + } + if (Immediate) { + assert(!Float && "fp immediates are not supported"); + S = "I" + S; + } + if (isScalar()) { + if (Constant) + S += "C"; + if (Pointer) + S += "*"; + return; + } + S = "q" + utostr(Vscale.getValue()) + S; +} + +void RVVType::compute_clang_builtin_str() { + assert(isValid() && "RVVType is invalid"); + assert(isVector() && "Handle Vector type only"); + + std::string &S = ClangBuiltinStr; + S += "__rvv_"; + if (Bool) + S += "bool"; + else if (Float) + S += "float"; + else if (Signed) + S += "int"; + else + S += "uint"; + if (Bool) + S += utostr(64 / Vscale.getValue()); + else + S += utostr(ElementBitwidth); + if (!Bool) { + S += LMUL.str(); + } + S += "_t"; +} + +void RVVType::compute_type_str() { + assert(isValid() && "RVVType is invalid"); + std::string &S = Str; + if (Void) { + S = "void"; + return; + } else if (SIZE_T) { + S = "size_t"; + return; + } else if (PtrDiff_T) { + S = "ptriff_t"; + return; + } + if (Constant) + S += "const "; + if (isVector()) + S += "v"; + if (Bool) + S += "bool"; + else if (Float) + S += "float"; + else if (Signed) + S += "int"; + else + S += "uint"; + // Vector bool is special case, the formulate is `vbool_t = + // MVT::nxv<64/N>i1` ex. vbool16_t = MVT::nxv4i1 + if (Bool && isVector()) + S += utostr(64 / Vscale.getValue()); + else + S += utostr(ElementBitwidth); + // Non bool RVV vector type has LMUL + if (isVector() && !Bool) { + S += LMUL.str(); + } + S += "_t"; + if (Pointer) + S += " *"; +} + +void RVVType::compute_short_str() { + assert(isVector() && "only handle vector type"); + if (Bool) { + ShortStr = "b" + utostr(64 / Vscale.getValue()); + return; + } + std::string &S = ShortStr; + if (Float) + S = "f"; + else if (Signed) + S = "i"; + else + S = "u"; + S += utostr(ElementBitwidth) + LMUL.str(); +} + +void RVVType::applyBasicType() { + switch (BT) { + case 'c': + ElementBitwidth = 8; + break; + case 's': + ElementBitwidth = 16; + break; + case 'i': + ElementBitwidth = 32; + break; + case 'l': + ElementBitwidth = 64; + break; + case 'h': + ElementBitwidth = 16; + Float = true; + break; + case 'f': + ElementBitwidth = 32; + Float = true; + break; + case 'd': + ElementBitwidth = 64; + Float = true; + break; + default: + llvm_unreachable("Unhandled type code!"); + } + assert(ElementBitwidth != ~0U && "Bad element bitwidth!"); +} + +void RVVType::applyModifier(StringRef transformer) { + if (transformer.empty()) + return; + // Handle primitive type transformer + switch (transformer.back()) { + case 'e': + Vscale = 0; + break; + case 'v': + Vscale = LMUL.getVscale(ElementBitwidth); + break; + case 'w': + ElementBitwidth *= 2; + LMUL *= 2; + Vscale = LMUL.getVscale(ElementBitwidth); + break; + case 'q': + ElementBitwidth *= 4; + LMUL *= 4; + Vscale = LMUL.getVscale(ElementBitwidth); + break; + case 'o': + ElementBitwidth *= 8; + LMUL *= 8; + Vscale = LMUL.getVscale(ElementBitwidth); + break; + case 'm': + Bool = true; + Float = false; + Vscale = LMUL.getVscale(ElementBitwidth); + ElementBitwidth = 1; + break; + case '0': + Void = true; + break; + case 'z': + SIZE_T = true; + break; + case 't': + PtrDiff_T = true; + break; + case 'c': // uint8_t + Signed = false; + ElementBitwidth = 8; + Vscale = 0; + break; + default: + llvm_unreachable("Illegal primitive type transformers!"); + } + + // Compute type transformers + for (char I : transformer.take_front(transformer.size() - 1)) { + switch (I) { + case 'P': + Pointer = true; + break; + case 'C': + Constant = true; + break; + case 'K': + Immediate = true; + break; + case 'U': + Signed = false; + break; + case 'I': + Float = false; + break; + case 'F': + Float = true; + break; + case 'W': + assert(isVector() && "'W' type transformer cannot be used on vectors"); + ElementBitwidth *= 2; + break; + case 'S': + LMUL = ELMULType(0); + break; + default: + llvm_unreachable("Illegal non-primitive type transformer!"); + } + } +} + +//===----------------------------------------------------------------------===// +// RVVIntrinsic implementation +//===----------------------------------------------------------------------===// +RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix, + StringRef MangledSuffix, StringRef IRName, + bool HasSideEffects, bool IsMask, + bool HasMergeOperand, bool HasVL, bool HasGeneric, + const RVVTypes &Types, + const std::vector &IntrinsicTypes) + : IRName(IRName), HasSideEffects(HasSideEffects), IsMask(IsMask), + HasMergeOperand(HasMergeOperand), HasVL(HasVL), HasGeneric(HasGeneric), + Types(Types), IntrinsicTypes(IntrinsicTypes) { + + // Init Name and MangledName + Name = NewName.str(); + MangledName = Twine(NewName.split("_").first).str(); + if (Suffix.size()) + Name += "_" + Suffix.str(); + if (MangledSuffix.size()) + MangledName += "_" + MangledSuffix.str(); + if (IsMask) { + Name += "_m"; + MangledName += "_m"; + } + if (HasVL) { + Name += "_vl"; + MangledName += "_vl"; + } + // Init RISCV_Extensions + for (const auto &T : Types) { + if (T->isHalfVector()) { + RISCV_Extensions |= static_cast(RISCV_Extension::ZFH); + } else if (T->isFloatVector()) { + RISCV_Extensions |= static_cast(RISCV_Extension::F); + } else if (T->isDoubleVector()) { + RISCV_Extensions |= static_cast(RISCV_Extension::D); + } + } +} + +std::string RVVIntrinsic::getBuiltinTypeStr() const { + std::string S; + for (const auto &T : Types) { + S += T->builtin_str(); + } + return S; +} + +void RVVIntrinsic::emitCodeGenSwitchBody(raw_ostream &OS) const { + + auto getIntrinsicTypesString = + [this](const std::vector &IntrinsicTypes, raw_ostream &OS) { + OS << " ID = Intrinsic::riscv_" + getIRName() + ";\n"; + + OS << " IntrinsicTypes = {"; + for (const auto &Idx : IntrinsicTypes) { + if (Idx == -1) + OS << "ResultType"; + else + OS << ", Ops[" + utostr(static_cast(Idx)) + + "]->getType()"; + } + + // VL could be i64 or i32, need to encode it in IntrinsicTypes. VL is + // always last operand. + if (hasVL()) + OS << ", Ops[" + utostr(getNumOperand() - 1) + "]->getType()"; + OS << "};\n"; + }; + + if (!IsMask) { + getIntrinsicTypesString(getIntrinsicTypes(), OS); + return; + } + // IntrinsicTypes is the nonmasked version index, we need to update + // it. (It does not count the additional mask operand and merge operand.) + signed Skew = 1; + if (hasMergeOperand()) + Skew = 2; + std::vector NewIntrinsicTypes = getIntrinsicTypes(); + for (auto &I : NewIntrinsicTypes) { + if (I >= 0) + I += Skew; + } + getIntrinsicTypesString(NewIntrinsicTypes, OS); + + // The order of operands is (mask, maskedoff, op1, op..., vl). + // The order of intrinsic operands is (maskedoff, op1, op..., mask, vl) + // for masked operation with mask and maskedoff + // or + // The order of operands is (mask, op1, op..., vl). + // The order of intrinsic operands is (op1, op..., mask, vl) + // for masked operation with mask only. + OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);\n"; +} + +std::string RVVIntrinsic::getFuncDelc(Twine FuncName) const { + // Index 0 is output type + std::string S = + Twine(Types[0]->type_str() + Twine(" ") + FuncName + Twine("(")).str(); + + auto concat = [&](std::string a, const RVVTypePtr &b) { + size_t idx = &b - &Types[0]; + return std::move(a) + ", " + b->type_str() + " arg_" + utostr(idx); + }; + // Append function arguments string + if (Types.size() > 1) { + std::string Arguments = + std::accumulate(Types.begin() + 2, Types.end(), + Types[1]->type_str() + " arg_1", concat); + S += Arguments; + } + S += ")"; + return S; +} + +std::string RVVIntrinsic::createFunction(Twine FuncName, + Twine CalleeName) const { + std::string S(getFuncDelc(FuncName)); + S += " {\n"; + + S += Twine(" return " + CalleeName + "(").str(); + // Append parameter variables + if (Types.size() > 1) { + S += "arg_1"; + for (unsigned i = 2; i < Types.size(); ++i) + S += ", arg_" + utostr(i); + } + S += ");\n"; + + S += "}\n\n"; + return S; +} + +//===----------------------------------------------------------------------===// +// RVVEmitter implementation +//===----------------------------------------------------------------------===// +void RVVEmitter::createHeader(raw_ostream &OS) { + + OS << "/*===---- riscv_vector.h - RISC-V V-extension RVVIntrinsics " + "-------------------===\n" + " *\n" + " *\n" + " * Part of the LLVM Project, under the Apache License v2.0 with LLVM " + "Exceptions.\n" + " * See https://llvm.org/LICENSE.txt for license information.\n" + " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" + " *\n" + " *===-----------------------------------------------------------------" + "------===\n" + " */\n\n"; + + OS << "#ifndef _RISCV_VECTOR_H\n"; + OS << "#define _RISCV_VECTOR_H\n\n"; + + OS << "#include \n"; + OS << "#include \n\n"; + + OS << "#ifndef __riscv_vector\n"; + OS << "#error \"Vector intrinsics require the vector extension.\"\n"; + OS << "#else\n\n"; + + // TODO: Upadte psABI to define float16_t type. + // OS << "typedef __fp16 float16_t;\n"; + OS << "typedef float float32_t;\n"; + OS << "typedef double float64_t;\n\n"; + + OS << "#ifdef __cplusplus\n"; + OS << "extern \"C\" {\n"; + OS << "#endif\n\n"; + + std::vector> Defs; + createRVVIntrinsics(Defs); + + // Dump RVV boolean types. + auto dumpType = [&](auto T) { + OS << "typedef " << T->clang_builtin_str() << " " << T->type_str() << ";\n"; + }; + + constexpr int ELMULs[] = {-3, -2, -1, 0, 1, 2, 3}; + for (int ELMUL : ELMULs) { + auto T = computeType('c', ELMUL, "m"); + if (T.hasValue()) + dumpType(T.getValue()); + } + // Dump RVV int/float types. + for (char I : StringRef("csil")) + for (int ELMUL : ELMULs) { + auto T = computeType(I, ELMUL, "v"); + if (T.hasValue()) { + dumpType(T.getValue()); + auto UT = computeType(I, ELMUL, "Uv"); + dumpType(UT.getValue()); + } + } + // Dump RVV float types. + OS << "#if defined(__riscv_zfh)\n"; + for (int ELMUL : ELMULs) { + auto T = computeType('h', ELMUL, "v"); + // first. + if (T.hasValue()) + dumpType(T.getValue()); + } + OS << "#endif\n"; + + // D implies F + OS << "#if defined(__riscv_f) || defined(__riscv_d)\n"; + for (int ELMUL : ELMULs) { + auto T = computeType('f', ELMUL, "v"); + if (T.hasValue()) + dumpType(T.getValue()); + } + OS << "#endif\n"; + + OS << "#if defined(__riscv_d)\n"; + for (int ELMul : ELMULs) { + auto T = computeType('d', ELMul, "v"); + if (T.hasValue()) + dumpType(T.getValue()); + } + OS << "#endif\n\n"; + + // Dump intrinsic functions with macro + emitArchMacroAndBody( + Defs, OS, [this](raw_ostream &OS, const RVVIntrinsic &Inst) { + OS << "// " << Inst.getFuncDelc(Inst.getName()) << "\n"; + OS << "#define " << Inst.getName() << "(...) __builtin_rvv_" + << Inst.getName() << "(__VA_ARGS__)\n"; + }); + + OS << "\n#ifdef __cplusplus\n"; + OS << "}\n"; + OS << "#endif\n"; + OS << "#endif // __riscv_vector\n"; + OS << "#endif // _RISCV_VECTOR_H\n"; +} + +void RVVEmitter::createGenericHeader(raw_ostream &OS) { + std::vector> Defs; + createRVVIntrinsics(Defs); + + OS << "#include \n\n"; + // Dump intrinsic functions macro + emitArchMacroAndBody( + Defs, OS, [this](raw_ostream &OS, const RVVIntrinsic &Inst) { + if (!Inst.hasGeneric()) + return; + OS << StringRef( + "static inline __attribute__((__always_inline__, __nodebug__, " + "__overloadable__))\n"); + OS << Inst.createFunction(Inst.getMangledName(), Inst.getName()); + }); +} + +void RVVEmitter::createBuiltins(raw_ostream &OS) { + std::vector> Defs; + createRVVIntrinsics(Defs); + + OS << "#if defined(BUILTIN) && !defined(RISCVV_BUILTIN)\n"; + OS << "#define RISCVV_BUILTIN(ID, TYPE, ATTRS) BUILTIN(ID, TYPE, ATTRS)\n"; + OS << "#endif\n"; + for (auto &Def : Defs) { + OS << "RISCVV_BUILTIN(" << Def->getName() << ",\"" + << Def->getBuiltinTypeStr() << "\", "; + if (!Def->hasSideEffects()) + OS << "\"n\")\n"; + else + OS << "\"\")\n"; + } + OS << "\n#undef BUILTIN\n"; + OS << "#undef RISCVV_BUILTIN\n"; +} + +void RVVEmitter::createCodeGen(raw_ostream &OS) { + std::vector> Defs; + createRVVIntrinsics(Defs); + + // The same intrinsic name has the same switch body. + llvm::StringMap, 128>> DefsSet; + for (auto &def : Defs) { + DefsSet[def->getIRName()].push_back(std::move(def)); + } + for (const auto &KV : DefsSet) { + for (const auto &I : KV.getValue()) { + OS << "case RISCV::BI" << I->getName() << ":\n"; + } + KV.getValue()[0]->emitCodeGenSwitchBody(OS); + OS << "\n break;\n"; + } +} + +void RVVEmitter::createTest(raw_ostream &OS, bool IsGeneric) { + std::vector> Defs; + createRVVIntrinsics(Defs); + + std::string S; + for (auto &Def : Defs) { + // Some intrinsics have no generic functions + if (!Def->hasGeneric() && IsGeneric) + continue; + StringRef Name(IsGeneric ? Def->getMangledName() : Def->getName()); + S += Def->createFunction(Twine("test_" + Def->getName()), Name); + } + if (S.empty()) + return; + OS << "// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature " + "+d -target-feature +experimental-v \\\n"; + OS << "// RUN: -target-feature +experimental-zfh -disable-O0-optnone " + "-emit-llvm %s -o - | opt -S -mem2reg | FileCheck " + "--check-prefix=CHECK-RV32 %s\n"; + OS << "// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature " + "+d -target-feature +experimental-v \\\n"; + OS << "// RUN: -target-feature +experimental-zfh -disable-O0-optnone " + "-emit-llvm %s -o - | opt -S -mem2reg | FileCheck " + "--check-prefix=CHECK-RV64 %s\n"; + OS << "// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature " + "+d -target-feature +experimental-v \\\n"; + OS << "// RUN: -target-feature +experimental-zfh -Werror -Wall -o - %s " + ">/dev/null 2>%t\n"; + OS << "// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t\n\n"; + + OS << "// NOTE: This file is autogenerated by 'bin/clang-tblgen " + "-gen-riscv-vector-test clang/include/clang/Basic/riscv_vector.td " + "-D='\n\n"; + OS << "// ASM-NOT: warning\n"; + if (IsGeneric) + OS << "#include \n\n"; + else + OS << "#include \n\n"; + OS << S; +} + +void RVVEmitter::createRVVIntrinsics( + std::vector> &Out) { + + std::vector RV = Records.getAllDerivedDefinitions("RVVBuiltin"); + for (auto *R : RV) { + StringRef Name = R->getValueAsString("Name"); + StringRef Suffix = R->getValueAsString("Suffix"); + StringRef MangledSuffix = R->getValueAsString("MangledSuffix"); + StringRef Prototypes = R->getValueAsString("Prototype"); + StringRef TypeRange = R->getValueAsString("TypeRange"); + bool HasMask = R->getValueAsBit("HasMask"); + bool HasMergeOperand = R->getValueAsBit("HasMergeOperand"); + bool HasVL = R->getValueAsBit("HasVL"); + bool HasGeneric = R->getValueAsBit("HasGeneric"); + bool HasSideEffects = R->getValueAsBit("HasSideEffects"); + std::vector ELMULList = R->getValueAsListOfInts("ELMUL"); + std::vector IntrinsicTypes = + R->getValueAsListOfInts("IntrinsicTypes"); + StringRef IRName = R->getValueAsString("IRName"); + StringRef IRNameMask = R->getValueAsString("IRNameMask"); + + // Parse prototype and create a list of primitive type with transformers + // (operand) in ProtoSeq. ProtoSeq[0] is output operand. + SmallVector ProtoSeq; + const StringRef Primaries("evwqom0ztc"); + size_t start = 0; + for (size_t i = 0; i < Prototypes.size(); ++i) { + if (Primaries.find(Prototypes[i]) != StringRef::npos) { + ProtoSeq.push_back(Prototypes.substr(start, i - start + 1).str()); + start = i + 1; + } + } + // If HasVL, append 'z' to last operand + if (HasVL) + ProtoSeq.push_back("z"); + + SmallVector ProtoMaskSeq = ProtoSeq; + if (HasMask) { + // If HasMask, insert 'm' as first input operand. + ProtoMaskSeq.insert(ProtoMaskSeq.begin() + 1, "m"); + // If HasMergeOperand, insert result type as second input operand. + if (HasMergeOperand) + ProtoMaskSeq.insert(ProtoMaskSeq.begin() + 2, ProtoSeq[0]); + } + + // Create intrinsics for each type and LMUL. + for (char I : TypeRange) { + for (int ELMUL : ELMULList) { + Optional Types = computeTypes(I, ELMUL, ProtoSeq); + // Ignored to create new intrinsic if there are any illegal types. + if (!Types.hasValue()) + continue; + + auto SuffixStr = computeType(I, ELMUL, Suffix).getValue()->short_str(); + auto MSuffixStr = + MangledSuffix.empty() + ? "" + : computeType(I, ELMUL, MangledSuffix).getValue()->short_str(); + // Create a non-mask intrinsic. + Out.push_back(std::make_unique( + Name, SuffixStr, MSuffixStr, IRName, HasSideEffects, + /*IsMask=*/false, /*HasMergeOperand=*/false, HasVL, HasGeneric, + Types.getValue(), IntrinsicTypes)); + if (HasMask) { + // Create a mask intrinsic + Optional MaskTypes = computeTypes(I, ELMUL, ProtoMaskSeq); + Out.push_back(std::make_unique( + Name, SuffixStr, MSuffixStr, IRNameMask, HasSideEffects, + /*IsMask=*/true, HasMergeOperand, HasVL, HasGeneric, + MaskTypes.getValue(), IntrinsicTypes)); + } + } // end for ELMUL + } // end for TypeRange + } +} + +Optional +RVVEmitter::computeTypes(BasicType BT, int ELMUL, + ArrayRef PrototypeSeq) { + RVVTypes Types; + for (const std::string &Proto : PrototypeSeq) { + auto T = computeType(BT, ELMUL, Proto); + if (!T.hasValue()) + return llvm::None; + // Record legal type index + Types.push_back(T.getValue()); + } + return Optional(Types); +} + +Optional RVVEmitter::computeType(BasicType BT, int ELMUL, + StringRef Proto) { + TypeString Idx = Twine(Twine(BT) + Twine(ELMUL) + Proto).str(); + // Search first + auto It = LegalTypes.find(Idx); + if (It != LegalTypes.end()) + return Optional(&(It->second)); + if (IllegalTypes.count(Idx)) + return llvm::None; + // Compute type and record the result. + RVVType T(BT, ELMUL, Proto); + if (T.isValid()) { + // Record legal type index and value. + LegalTypes.insert({Idx, T}); + return Optional(&(LegalTypes[Idx])); + } + // Record illegal type index. + IllegalTypes.insert(Idx); + return llvm::None; +} + +void RVVEmitter::emitArchMacroAndBody( + std::vector> &Defs, raw_ostream &OS, + std::function DumpBody) { + + // Collect the same extension intrinsic in the one set for arch guard marco. + DenseMap, 256>> DefsSet; + for (auto &def : Defs) { + DefsSet[def->getRISCV_Extensions()].push_back(std::move(def)); + } + + for (const auto &KV : DefsSet) { + SmallVector ExtStrings = getExtStrings(KV.getFirst()); + // Dump arch predecessor definitions + if (!ExtStrings.empty()) { + OS << "#if defined (" + ExtStrings[0] + ")"; + for (unsigned i = 1; i < ExtStrings.size(); ++i) + OS << " || defined(" << ExtStrings[i] << ")"; + OS << "\n"; + } + for (auto &Def : KV.getSecond()) { + DumpBody(OS, *Def); + } + if (!ExtStrings.empty()) + OS << "#endif\n\n"; + } +} + +SmallVector RVVEmitter::getExtStrings(uint8_t Extents) { + if (Extents == 0) + return {}; + SmallVector ExtVector; + // D implies F + if (Extents & static_cast(RISCV_Extension::F)) { + ExtVector.emplace_back("__riscv_f"); + ExtVector.emplace_back("__riscv_d"); + } + if (Extents & static_cast(RISCV_Extension::D)) { + ExtVector.emplace_back("__riscv_d"); + } + if (Extents & static_cast(RISCV_Extension::ZFH)) { + ExtVector.emplace_back("__riscv_zfh"); + } + return ExtVector; +} + +namespace clang { +void EmitRVVHeader(RecordKeeper &Records, raw_ostream &OS) { + RVVEmitter(Records).createHeader(OS); +} + +void EmitRVVGenericHeader(RecordKeeper &Records, raw_ostream &OS) { + RVVEmitter(Records).createGenericHeader(OS); +} + +void EmitRVVBuiltins(RecordKeeper &Records, raw_ostream &OS) { + RVVEmitter(Records).createBuiltins(OS); +} + +void EmitRVVBuiltinCG(RecordKeeper &Records, raw_ostream &OS) { + RVVEmitter(Records).createCodeGen(OS); +} + +void EmitRVVTest(RecordKeeper &Records, raw_ostream &OS) { + RVVEmitter(Records).createTest(OS, /*IsGeneric=*/false); +} + +void EmitRVVGenericTest(RecordKeeper &Records, raw_ostream &OS) { + RVVEmitter(Records).createTest(OS, /*IsGeneric=*/true); +} + +} // End namespace clang diff --git a/clang/utils/TableGen/TableGen.cpp b/clang/utils/TableGen/TableGen.cpp --- a/clang/utils/TableGen/TableGen.cpp +++ b/clang/utils/TableGen/TableGen.cpp @@ -83,6 +83,12 @@ GenArmCdeBuiltinSema, GenArmCdeBuiltinCG, GenArmCdeBuiltinAliases, + GenRISCVVectorHeader, + GenRISCVVectorGenericHeader, + GenRISCVVectorBuiltins, + GenRISCVVectorBuiltinCG, + GenRISCVVectorTest, + GenRISCVVectorGenericTest, GenAttrDocs, GenDiagDocs, GenOptDocs, @@ -228,6 +234,19 @@ "Generate ARM CDE builtin code-generator for clang"), clEnumValN(GenArmCdeBuiltinAliases, "gen-arm-cde-builtin-aliases", "Generate list of valid ARM CDE builtin aliases for clang"), + clEnumValN(GenRISCVVectorHeader, "gen-riscv-vector-header", + "Generate riscv_vector.h for clang"), + clEnumValN(GenRISCVVectorGenericHeader, + "gen-riscv-vector-generic-header", + "Generate riscv_vector_generic.h for clang"), + clEnumValN(GenRISCVVectorBuiltins, "gen-riscv-vector-builtins", + "Generate riscv_vector_builtins.inc for clang"), + clEnumValN(GenRISCVVectorBuiltinCG, "gen-riscv-vector-builtin-codegen", + "Generate riscv_vector_builtin_cg.inc for clang"), + clEnumValN(GenRISCVVectorTest, "gen-riscv-vector-test", + "Generate RISCV Vector tests for clang"), + clEnumValN(GenRISCVVectorGenericTest, "gen-riscv-vector-generic-test", + "Generate RISCV Vector generic tests for clang"), clEnumValN(GenAttrDocs, "gen-attr-docs", "Generate attribute documentation"), clEnumValN(GenDiagDocs, "gen-diag-docs", @@ -428,6 +447,24 @@ case GenArmCdeBuiltinAliases: EmitCdeBuiltinAliases(Records, OS); break; + case GenRISCVVectorHeader: + EmitRVVHeader(Records, OS); + break; + case GenRISCVVectorGenericHeader: + EmitRVVGenericHeader(Records, OS); + break; + case GenRISCVVectorBuiltins: + EmitRVVBuiltins(Records, OS); + break; + case GenRISCVVectorBuiltinCG: + EmitRVVBuiltinCG(Records, OS); + break; + case GenRISCVVectorTest: + EmitRVVTest(Records, OS); + break; + case GenRISCVVectorGenericTest: + EmitRVVGenericTest(Records, OS); + break; case GenAttrDocs: EmitClangAttrDocs(Records, OS); break; diff --git a/clang/utils/TableGen/TableGenBackends.h b/clang/utils/TableGen/TableGenBackends.h --- a/clang/utils/TableGen/TableGenBackends.h +++ b/clang/utils/TableGen/TableGenBackends.h @@ -106,6 +106,13 @@ void EmitMveBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); void EmitMveBuiltinAliases(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); +void EmitRVVHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); +void EmitRVVGenericHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); +void EmitRVVBuiltins(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); +void EmitRVVBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); +void EmitRVVTest(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); +void EmitRVVGenericTest(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); + void EmitCdeHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); void EmitCdeBuiltinDef(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); void EmitCdeBuiltinSema(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); diff --git a/clang/utils/TestUtils/gen-riscv-v-tests.sh b/clang/utils/TestUtils/gen-riscv-v-tests.sh new file mode 100755 --- /dev/null +++ b/clang/utils/TestUtils/gen-riscv-v-tests.sh @@ -0,0 +1,41 @@ +#!/bin/bash +# +# Generate riscv-v vector intrinsic tests in clang/test/CodeGen/RISCV/riscv-rvv-intrinsics +# and clang/test/CodeGen/RISCV/riscv-rvv-intrinsics-generic +# +# usages: gen-riscv-v-tests.sh path/to/llvm/bin +# +# 1. use 'clang-tblgen' to read clang/include/clang/Basic/riscv_vector.td +# and generate tests. +# 2. use update_cc_test_checks.py to update the expected result + +if [ "$#" -ne 1 ]; then + echo "Usages: ./gen-riscv-v-tests.sh path/to/llvm/bin" + exit +fi + +src_path="$(dirname $(realpath $0))/../../../" +bin_path=$1 + +gen_tests(){ +# op_list have marco name used in riscv_vector.td + local op_list="VADD VFADD" + local option="$1" + local suffix="$2" + local path="${src_path}/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics${suffix}" + if [[ ! -d $path ]]; then + mkdir $path -p + fi + for op in ${op_list}; do + local file=${path}/${op,,}.c + ${bin_path}/clang-tblgen $option ${src_path}/clang/include/clang/Basic/riscv_vector.td -o=${file} --write-if-changed -D=${op} + if [ -s ${file} ]; then + ${src_path}/llvm/utils/update_cc_test_checks.py --llvm-bin=${bin_path} ${file} + else + rm ${file} + fi + done +} + +gen_tests "-gen-riscv-vector-test" "" +gen_tests "-gen-riscv-vector-generic-test" "-generic" diff --git a/clang/utils/TestUtils/gen-rvv-tests.py b/clang/utils/TestUtils/gen-rvv-tests.py new file mode 100755 --- /dev/null +++ b/clang/utils/TestUtils/gen-rvv-tests.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 + +''' + Generate RISC-V V intrinsic tests in clang/test/CodeGen/RISCV/riscv-rvv-intrinsics + and clang/test/CodeGen/RISCV/riscv-rvv-intrinsics-generic + + Usages: + +% clang/utils/TestUtils/gen-rvv-tests.py path/to/llvm/bin + + 1. use 'clang-tblgen' to read clang/include/clang/Basic/riscv_vector.td + and generate tests in clang/test/CodeGen/RISCV/riscv-rvv-intrinsics and + clang/test/CodeGen/RISCV/riscv-rvv-intrinsics-generic + 2. use update_cc_test_checks.py to update the expected result. +''' + +import os +import subprocess +import sys +import importlib.util + +# op_list have marco name used in riscv_vector.td +op_list = ['VADD', 'VFADD'] + + +def gen_tests(option, tests_path, update_cc_test_checks, update_cc_argv, args, suffix=''): + tests_folder = os.path.join(tests_path, 'riscv-rvv-intrinsics%s' % suffix) + for op in op_list: + out_file = os.path.join(tests_folder, '%s.c' % op.lower()) + new_args = args + ['-D=%s' % op, option] + output = subprocess.check_output(new_args) + if len(output) > 0: + with open(out_file, 'wb') as f: + f.write(output) + sys.argv = update_cc_argv + [out_file] + update_cc_test_checks.main() + + +def main(): + llvm_bin = sys.argv[1] + file_path = os.path.abspath(os.path.dirname(__file__)) + llvm_project_path = os.path.join(file_path, '..', '..', '..') + td_path = os.path.join(llvm_project_path, 'clang', + 'include', 'clang', 'Basic', 'riscv_vector.td') + tests_path = os.path.join(llvm_project_path, 'clang', 'test', + 'CodeGen', 'RISCV') + update_cc_test_checks_file = os.path.join( + llvm_project_path, 'llvm', 'utils', 'update_cc_test_checks.py') + # update_cc_test_checks depends on UpdateTestChecks in llvm/utils + sys.path.insert(0, os.path.join(llvm_project_path, 'llvm', 'utils')) + spec = importlib.util.spec_from_file_location( + 'update_cc_test_checks', update_cc_test_checks_file) + update_cc_test_checks = importlib.util.module_from_spec(spec) + spec.loader.exec_module(update_cc_test_checks) + update_cc_argv = [update_cc_test_checks_file, '--llvm-bin=%s' % llvm_bin] + args = ['%s/clang-tblgen' % llvm_bin, td_path] + gen_tests('-gen-riscv-vector-test', tests_path, update_cc_test_checks, update_cc_argv, + args) + gen_tests('-gen-riscv-vector-generic-test', tests_path, update_cc_test_checks, update_cc_argv, + args, '-generic') + +if __name__ == '__main__': + sys.exit(main()) diff --git a/llvm/docs/CommandGuide/tblgen.rst b/llvm/docs/CommandGuide/tblgen.rst --- a/llvm/docs/CommandGuide/tblgen.rst +++ b/llvm/docs/CommandGuide/tblgen.rst @@ -138,7 +138,7 @@ .. option:: -gen-compress-inst-emitter - Generate RISCV compressed instructions. + Generate RISC-V compressed instructions. .. option:: -gen-ctags @@ -541,6 +541,30 @@ Generate list of valid ARM CDE builtin aliases for Clang. +.. option:: -gen-riscv-vector-header + + Generate ``riscv_vector.h`` for Clang. + +.. option:: -gen-riscv-vector-generic-header + + Generate ``riscv_vector_generic.h`` for Clang. + +.. option:: -gen-riscv-vector-builtins + + Generate ``riscv_vector_builtins.inc`` for Clang. + +.. option:: -gen-riscv-vector-builtin-codegen + + Generate ``riscv_vector_builtin_cg.inc`` for Clang. + +.. option:: -gen-riscv-vector-generic-test + + Generate RISC-V Vector tests for Clang. + +.. option:: -gen-riscv-vector-generic-generic-test + + Generate RISC-V Vector generic tests for Clang. + .. option:: -gen-attr-docs Generate attribute documentation.