diff --git a/clang/include/clang/Basic/BuiltinsRISCV.def b/clang/include/clang/Basic/BuiltinsRISCV.def --- a/clang/include/clang/Basic/BuiltinsRISCV.def +++ b/clang/include/clang/Basic/BuiltinsRISCV.def @@ -1,183 +1,3 @@ -#if defined(BUILTIN) && !defined(RISCVV_BUILTIN) -#define RISCVV_BUILTIN(ID, TYPE, ATTRS) BUILTIN(ID, TYPE, ATTRS) -#endif -RISCVV_BUILTIN(vadd_vv_i8m1_vl, "q8Scq8Scq8Scz", "n") -RISCVV_BUILTIN(vadd_vv_i8m1_m_vl, "q8Scq8bq8Scq8Scq8Scz", "n") -RISCVV_BUILTIN(vadd_vv_i16m1_vl, "q4Ssq4Ssq4Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i16m1_m_vl, "q4Ssq4bq4Ssq4Ssq4Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i32m1_vl, "q2Siq2Siq2Siz", "n") -RISCVV_BUILTIN(vadd_vv_i32m1_m_vl, "q2Siq2bq2Siq2Siq2Siz", "n") -RISCVV_BUILTIN(vadd_vv_i64m1_vl, "q1SWiq1SWiq1SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i64m1_m_vl, "q1SWiq1bq1SWiq1SWiq1SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i8m2_vl, "q16Scq16Scq16Scz", "n") -RISCVV_BUILTIN(vadd_vv_i8m2_m_vl, "q16Scq16bq16Scq16Scq16Scz", "n") -RISCVV_BUILTIN(vadd_vv_i16m2_vl, "q8Ssq8Ssq8Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i16m2_m_vl, "q8Ssq8bq8Ssq8Ssq8Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i32m2_vl, "q4Siq4Siq4Siz", "n") -RISCVV_BUILTIN(vadd_vv_i32m2_m_vl, "q4Siq4bq4Siq4Siq4Siz", "n") -RISCVV_BUILTIN(vadd_vv_i64m2_vl, "q2SWiq2SWiq2SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i64m2_m_vl, "q2SWiq2bq2SWiq2SWiq2SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i8m4_vl, "q32Scq32Scq32Scz", "n") -RISCVV_BUILTIN(vadd_vv_i8m4_m_vl, "q32Scq32bq32Scq32Scq32Scz", "n") -RISCVV_BUILTIN(vadd_vv_i16m4_vl, "q16Ssq16Ssq16Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i16m4_m_vl, "q16Ssq16bq16Ssq16Ssq16Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i32m4_vl, "q8Siq8Siq8Siz", "n") -RISCVV_BUILTIN(vadd_vv_i32m4_m_vl, "q8Siq8bq8Siq8Siq8Siz", "n") -RISCVV_BUILTIN(vadd_vv_i64m4_vl, "q4SWiq4SWiq4SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i64m4_m_vl, "q4SWiq4bq4SWiq4SWiq4SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i8m8_vl, "q64Scq64Scq64Scz", "n") -RISCVV_BUILTIN(vadd_vv_i8m8_m_vl, "q64Scq64bq64Scq64Scq64Scz", "n") -RISCVV_BUILTIN(vadd_vv_i16m8_vl, "q32Ssq32Ssq32Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i16m8_m_vl, "q32Ssq32bq32Ssq32Ssq32Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i32m8_vl, "q16Siq16Siq16Siz", "n") -RISCVV_BUILTIN(vadd_vv_i32m8_m_vl, "q16Siq16bq16Siq16Siq16Siz", "n") -RISCVV_BUILTIN(vadd_vv_i64m8_vl, "q8SWiq8SWiq8SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i64m8_m_vl, "q8SWiq8bq8SWiq8SWiq8SWiz", "n") -RISCVV_BUILTIN(vadd_vv_i8mf2_vl, "q4Scq4Scq4Scz", "n") -RISCVV_BUILTIN(vadd_vv_i8mf2_m_vl, "q4Scq4bq4Scq4Scq4Scz", "n") -RISCVV_BUILTIN(vadd_vv_i16mf2_vl, "q2Ssq2Ssq2Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i16mf2_m_vl, "q2Ssq2bq2Ssq2Ssq2Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i32mf2_vl, "q1Siq1Siq1Siz", "n") -RISCVV_BUILTIN(vadd_vv_i32mf2_m_vl, "q1Siq1bq1Siq1Siq1Siz", "n") -RISCVV_BUILTIN(vadd_vv_i8mf4_vl, "q2Scq2Scq2Scz", "n") -RISCVV_BUILTIN(vadd_vv_i8mf4_m_vl, "q2Scq2bq2Scq2Scq2Scz", "n") -RISCVV_BUILTIN(vadd_vv_i16mf4_vl, "q1Ssq1Ssq1Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i16mf4_m_vl, "q1Ssq1bq1Ssq1Ssq1Ssz", "n") -RISCVV_BUILTIN(vadd_vv_i8mf8_vl, "q1Scq1Scq1Scz", "n") -RISCVV_BUILTIN(vadd_vv_i8mf8_m_vl, "q1Scq1bq1Scq1Scq1Scz", "n") -RISCVV_BUILTIN(vadd_vx_i8m1_vl, "q8Scq8ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i8m1_m_vl, "q8Scq8bq8Scq8ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i16m1_vl, "q4Ssq4SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i16m1_m_vl, "q4Ssq4bq4Ssq4SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i32m1_vl, "q2Siq2SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i32m1_m_vl, "q2Siq2bq2Siq2SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m1_vl, "q1SWiq1SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m1_m_vl, "q1SWiq1bq1SWiq1SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i8m2_vl, "q16Scq16ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i8m2_m_vl, "q16Scq16bq16Scq16ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i16m2_vl, "q8Ssq8SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i16m2_m_vl, "q8Ssq8bq8Ssq8SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i32m2_vl, "q4Siq4SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i32m2_m_vl, "q4Siq4bq4Siq4SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m2_vl, "q2SWiq2SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m2_m_vl, "q2SWiq2bq2SWiq2SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i8m4_vl, "q32Scq32ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i8m4_m_vl, "q32Scq32bq32Scq32ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i16m4_vl, "q16Ssq16SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i16m4_m_vl, "q16Ssq16bq16Ssq16SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i32m4_vl, "q8Siq8SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i32m4_m_vl, "q8Siq8bq8Siq8SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m4_vl, "q4SWiq4SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m4_m_vl, "q4SWiq4bq4SWiq4SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i8m8_vl, "q64Scq64ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i8m8_m_vl, "q64Scq64bq64Scq64ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i16m8_vl, "q32Ssq32SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i16m8_m_vl, "q32Ssq32bq32Ssq32SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i32m8_vl, "q16Siq16SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i32m8_m_vl, "q16Siq16bq16Siq16SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m8_vl, "q8SWiq8SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i64m8_m_vl, "q8SWiq8bq8SWiq8SWiSWiz", "n") -RISCVV_BUILTIN(vadd_vx_i8mf2_vl, "q4Scq4ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i8mf2_m_vl, "q4Scq4bq4Scq4ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i16mf2_vl, "q2Ssq2SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i16mf2_m_vl, "q2Ssq2bq2Ssq2SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i32mf2_vl, "q1Siq1SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i32mf2_m_vl, "q1Siq1bq1Siq1SiSiz", "n") -RISCVV_BUILTIN(vadd_vx_i8mf4_vl, "q2Scq2ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i8mf4_m_vl, "q2Scq2bq2Scq2ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i16mf4_vl, "q1Ssq1SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i16mf4_m_vl, "q1Ssq1bq1Ssq1SsSsz", "n") -RISCVV_BUILTIN(vadd_vx_i8mf8_vl, "q1Scq1ScScz", "n") -RISCVV_BUILTIN(vadd_vx_i8mf8_m_vl, "q1Scq1bq1Scq1ScScz", "n") -RISCVV_BUILTIN(vadd_vv_u8m1_vl, "q8Ucq8Ucq8Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u8m1_m_vl, "q8Ucq8bq8Ucq8Ucq8Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u16m1_vl, "q4Usq4Usq4Usz", "n") -RISCVV_BUILTIN(vadd_vv_u16m1_m_vl, "q4Usq4bq4Usq4Usq4Usz", "n") -RISCVV_BUILTIN(vadd_vv_u32m1_vl, "q2Uiq2Uiq2Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u32m1_m_vl, "q2Uiq2bq2Uiq2Uiq2Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m1_vl, "q1UWiq1UWiq1UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m1_m_vl, "q1UWiq1bq1UWiq1UWiq1UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u8m2_vl, "q16Ucq16Ucq16Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u8m2_m_vl, "q16Ucq16bq16Ucq16Ucq16Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u16m2_vl, "q8Usq8Usq8Usz", "n") -RISCVV_BUILTIN(vadd_vv_u16m2_m_vl, "q8Usq8bq8Usq8Usq8Usz", "n") -RISCVV_BUILTIN(vadd_vv_u32m2_vl, "q4Uiq4Uiq4Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u32m2_m_vl, "q4Uiq4bq4Uiq4Uiq4Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m2_vl, "q2UWiq2UWiq2UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m2_m_vl, "q2UWiq2bq2UWiq2UWiq2UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u8m4_vl, "q32Ucq32Ucq32Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u8m4_m_vl, "q32Ucq32bq32Ucq32Ucq32Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u16m4_vl, "q16Usq16Usq16Usz", "n") -RISCVV_BUILTIN(vadd_vv_u16m4_m_vl, "q16Usq16bq16Usq16Usq16Usz", "n") -RISCVV_BUILTIN(vadd_vv_u32m4_vl, "q8Uiq8Uiq8Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u32m4_m_vl, "q8Uiq8bq8Uiq8Uiq8Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m4_vl, "q4UWiq4UWiq4UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m4_m_vl, "q4UWiq4bq4UWiq4UWiq4UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u8m8_vl, "q64Ucq64Ucq64Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u8m8_m_vl, "q64Ucq64bq64Ucq64Ucq64Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u16m8_vl, "q32Usq32Usq32Usz", "n") -RISCVV_BUILTIN(vadd_vv_u16m8_m_vl, "q32Usq32bq32Usq32Usq32Usz", "n") -RISCVV_BUILTIN(vadd_vv_u32m8_vl, "q16Uiq16Uiq16Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u32m8_m_vl, "q16Uiq16bq16Uiq16Uiq16Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m8_vl, "q8UWiq8UWiq8UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u64m8_m_vl, "q8UWiq8bq8UWiq8UWiq8UWiz", "n") -RISCVV_BUILTIN(vadd_vv_u8mf2_vl, "q4Ucq4Ucq4Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u8mf2_m_vl, "q4Ucq4bq4Ucq4Ucq4Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u16mf2_vl, "q2Usq2Usq2Usz", "n") -RISCVV_BUILTIN(vadd_vv_u16mf2_m_vl, "q2Usq2bq2Usq2Usq2Usz", "n") -RISCVV_BUILTIN(vadd_vv_u32mf2_vl, "q1Uiq1Uiq1Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u32mf2_m_vl, "q1Uiq1bq1Uiq1Uiq1Uiz", "n") -RISCVV_BUILTIN(vadd_vv_u8mf4_vl, "q2Ucq2Ucq2Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u8mf4_m_vl, "q2Ucq2bq2Ucq2Ucq2Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u16mf4_vl, "q1Usq1Usq1Usz", "n") -RISCVV_BUILTIN(vadd_vv_u16mf4_m_vl, "q1Usq1bq1Usq1Usq1Usz", "n") -RISCVV_BUILTIN(vadd_vv_u8mf8_vl, "q1Ucq1Ucq1Ucz", "n") -RISCVV_BUILTIN(vadd_vv_u8mf8_m_vl, "q1Ucq1bq1Ucq1Ucq1Ucz", "n") -RISCVV_BUILTIN(vadd_vx_u8m1_vl, "q8Ucq8UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u8m1_m_vl, "q8Ucq8bq8Ucq8UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u16m1_vl, "q4Usq4UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u16m1_m_vl, "q4Usq4bq4Usq4UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u32m1_vl, "q2Uiq2UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u32m1_m_vl, "q2Uiq2bq2Uiq2UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m1_vl, "q1UWiq1UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m1_m_vl, "q1UWiq1bq1UWiq1UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u8m2_vl, "q16Ucq16UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u8m2_m_vl, "q16Ucq16bq16Ucq16UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u16m2_vl, "q8Usq8UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u16m2_m_vl, "q8Usq8bq8Usq8UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u32m2_vl, "q4Uiq4UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u32m2_m_vl, "q4Uiq4bq4Uiq4UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m2_vl, "q2UWiq2UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m2_m_vl, "q2UWiq2bq2UWiq2UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u8m4_vl, "q32Ucq32UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u8m4_m_vl, "q32Ucq32bq32Ucq32UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u16m4_vl, "q16Usq16UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u16m4_m_vl, "q16Usq16bq16Usq16UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u32m4_vl, "q8Uiq8UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u32m4_m_vl, "q8Uiq8bq8Uiq8UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m4_vl, "q4UWiq4UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m4_m_vl, "q4UWiq4bq4UWiq4UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u8m8_vl, "q64Ucq64UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u8m8_m_vl, "q64Ucq64bq64Ucq64UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u16m8_vl, "q32Usq32UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u16m8_m_vl, "q32Usq32bq32Usq32UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u32m8_vl, "q16Uiq16UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u32m8_m_vl, "q16Uiq16bq16Uiq16UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m8_vl, "q8UWiq8UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u64m8_m_vl, "q8UWiq8bq8UWiq8UWiUWiz", "n") -RISCVV_BUILTIN(vadd_vx_u8mf2_vl, "q4Ucq4UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u8mf2_m_vl, "q4Ucq4bq4Ucq4UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u16mf2_vl, "q2Usq2UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u16mf2_m_vl, "q2Usq2bq2Usq2UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u32mf2_vl, "q1Uiq1UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u32mf2_m_vl, "q1Uiq1bq1Uiq1UiUiz", "n") -RISCVV_BUILTIN(vadd_vx_u8mf4_vl, "q2Ucq2UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u8mf4_m_vl, "q2Ucq2bq2Ucq2UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u16mf4_vl, "q1Usq1UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u16mf4_m_vl, "q1Usq1bq1Usq1UsUsz", "n") -RISCVV_BUILTIN(vadd_vx_u8mf8_vl, "q1Ucq1UcUcz", "n") -RISCVV_BUILTIN(vadd_vx_u8mf8_m_vl, "q1Ucq1bq1Ucq1UcUcz", "n") +#include "clang/Basic/riscv_vector_builtins.inc" -#undef BUILTIN -#undef RISCVV_BUILTIN diff --git a/clang/include/clang/Basic/CMakeLists.txt b/clang/include/clang/Basic/CMakeLists.txt --- a/clang/include/clang/Basic/CMakeLists.txt +++ b/clang/include/clang/Basic/CMakeLists.txt @@ -84,3 +84,9 @@ clang_tablegen(arm_cde_builtin_aliases.inc -gen-arm-cde-builtin-aliases SOURCE arm_cde.td TARGET ClangARMCdeBuiltinAliases) +clang_tablegen(riscv_vector_builtins.inc -gen-riscv-vector-builtins -D=ALL + SOURCE riscv_vector.td + TARGET ClangRISCVVectorBuiltins) +clang_tablegen(riscv_vector_builtin_cg.inc -gen-riscv-vector-builtin-codegen -D=ALL + SOURCE riscv_vector.td + TARGET ClangRISCVVectorBuiltinCG) diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td new file mode 100644 --- /dev/null +++ b/clang/include/clang/Basic/riscv_vector.td @@ -0,0 +1,223 @@ +//==--- riscv_vector.td - RISCV-V V-ext Builtin function list -------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines builtins for RISCV-V V-extension. See: +// +// https://github.com/riscv/rvv-intrinsic-doc +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Instruction definitions +//===----------------------------------------------------------------------===// +// Each record of the class RVVBuiltin defines a collection of builtins (i.e. +// "def vadd : RVVBuiltin" will be used to define things like "vadd_vv_i32m1", +// "vadd_vv_i32m2", etc). +// +// The elements of this collection are defined by an instantiation process the +// range of which is specified by cross product of the LMUL attribute and every +// element in the attribute TypeRange. By default builtins have LMUL = [1, 2, +// 4, 8, -2, -4, -8] so the process is repeated 7 times. A negative LMUL -x +// in that list is used to represent the fractional LMUL 1/x. +// +// LMUL represents the fact that the types of values used by that builtin are +// values generated by instructions that are executed under that LMUL. However, +// this does not mean the builtin is necessarily lowered into an instruction +// that executes under the specified LMUL. An example where this happens are +// loads and stores of masks. A mask like `vbool8_t` can be generated, for +// instance, by comparing two `__rvv_int8m1_t` (this is LMUL=1) or comparing two +// `__rvv_int16m2_t` (this is LMUL=2). The actual load or store, however, will +// be performed under LMUL=1 because mask registers are not grouped. +// +// TypeRange is a non-empty sequence of basic types: +// +// c: signed char (8-bit) +// s: short (16-bit) +// i: int (32-bit) +// l: long (64-bit) +// h: half (16-bit) +// f: float (32-bit) +// d: double (64-bit) +// +// This way, given an LMUL, a record with a TypeRange "sil" will cause the +// definition of 3 builtins. Each type "t" in the TypeRange (in this example +// they are short, int, long) is used as a parameter that drives +// the definition of that particular builtin (for the given LMUL). +// +// During the instantiation, types can be transformed or modified using type +// transformers. Given a type "t" the following primitive type transformers can +// be applied to it to yield another type. +// +// e: type of "t" as is (identity) +// v: computes a vector type whose element type is "t" for the current LMUL +// w: computes a vector type identical to what 'v' computes except for the +// element type which is twice as wide as the element type of 'v' +// q: computes a vector type identical to what 'v' computes except for the +// element type which is four times as wide as the element type of 'v' +// o: computes a vector type identical to what 'v' computes except for the +// element type which is eight times as wide as the element type of 'v' +// m: computes a vector type identical to what 'v' computes except for the +// element type which is bool +// 0: void type, ignores "t" +// z: size_t, ignores "t" +// t: ptrdiff_t, ignores "t" +// c: uint8_t, ignores "t" +// +// So for instance if t is "i", i.e. int, then "e" will yield int again. "v" +// will yield an RVV vector type (assume LMUL=1), so __rvv_int32m1_t. +// Accordingly "w" would yield __rvv_int64m2_t. +// +// A type transformer can be prefixed by other non-primitive type transformers. +// +// P: constructs a pointer to the current type +// C: adds const to the type +// K: requires the integer type to be a constant expression +// U: given an integer type or vector type, computes its unsigned variant +// I: given a vector type, compute the vector type with integer type +// elements of the same width +// F: given a vector type, compute the vector type with floating-point type +// elements of the same width +// W: widens an integer or float type. Cannot be used on vectors +// S: given a vector type, computes its equivalent one for LMUL=1. This is a +// no-op if the vector was already LMUL=1 +// +// Following with the example above, if t is "i", then "Ue" will yield unsigned +// int and "Fv" will yield __rvv_float32m1_t (again assuming LMUL=1), Fw would +// yield __rvv_float64m2_t, etc. +// +// Each builtin is then defined by applying each type in TypeRange against the +// sequence of type transformers described in Suffix and Prototype. +// +// The name of the builtin is defined by the Name attribute (which defaults to +// the name of the class) appended (separated with an underscore) the Suffix +// attribute. For instance with Name="foo", Suffix = "v" and TypeRange = "il", +// the builtin generated will be __builtin_rvv_foo_i32m1 and +// __builtin_rvv_foo_i64m1 (under LMUL=1). If Suffix contains more than one +// type transformer (say "vv") each of the types is separated with an +// underscore as in "__builtin_rvv_foo_i32m1_i32m1". +// +// The C/C++ prototype of the builtin is defined by the Prototype attribute. +// Prototype is a non-empty sequence of type transformers, the first of which +// is the return type of the builtin and the rest are the parameters of the +// builtin, in order. For instance if Prototype is "wvv" and TypeRange is "si" +// a first builtin will have type +// __rvv_int32m2_t (__rvv_int16m1_t, __rvv_int16m1_t) and the second builtin +// will have type __rvv_int64m2_t (__rvv_int32m1_t, __rvv_int32m1_t) (again +// under LMUL=1). +// +// There are a number of attributes that are used to constraint the number and +// shape of the builtins generated. Refer to the comments below for them. +class RVVBuiltin +{ + // Base name that will be prepended __builtin_rvv_ and appended the computed + // Suffix. + string Name = NAME; + + // If not empty, each instantiated builtin will have this appended after an + // underscore (_). Suffix is instantiated like Prototype. + string Suffix = suffix; + + // If not empty, each instantiated mangled builtin will have this appended after an + // underscore (_). It is instantiated like Prototype. + string MangledSuffix = managed_suffix; + + // For each type described in TypeRange we instantiate this Prototype. + string Prototype = prototype; + + // The different variants of the builtin, parameterised with a type. + string TypeRange = type_range; + + // This builtin has a masked form. + bit HasMask = 1; + + // If HasMask == 1, this flag states that this builtin has a first merge + // operand. + bit HasMergeOperand = 1; + + // This builtin has a granted vector length parameter in the last position. + bit HasVL = 1; + + // This builtin supprot function overloading and has a mangled name + bit HasGeneric = 1; + + // Reads or writes "memory". + bit HasSideEffects = 0; + + // This builtin is valid for the given LMUL. + list LMUL = [1, 2, 4, 8, -2, -4, -8]; + + // emit the automatic clang codegen. It describes + // what types we have to use to obtain the specific LLVM intrinsic. + // + // -1 means the return type, + // otherwise, k >= 0 meaning the k-th operand (counting from zero) of the + // codegen'd parameter of the unmasked version. k can't be the mask operand's + // position. + list IntrinsicTypes = []; + + // If these names are not empty, this is the ID of the LLVM intrinsic + // we want to lower to. + string IRName = NAME; + string IRNameMask = NAME # "_mask"; +} + +//===----------------------------------------------------------------------===// +// Basic classes with automatic codegen. +//===----------------------------------------------------------------------===// +class RVVBinBuiltin + : RVVBuiltin +{ + let IntrinsicTypes = [-1, 1]; +} + +multiclass RVVBinBuiltinSet> suffixes_prototypes> { + + let IRName = intrinsic_name, + IRNameMask = intrinsic_name # "_mask" in + { + foreach s_p = suffixes_prototypes in + { + let Name = NAME # "_" # !head(s_p) in + { + defvar suffix = !head(!tail(s_p)); + defvar prototype = !head(!tail(!tail(s_p))); + + def : RVVBinBuiltin; + } + } + } +} + +// `All` marco should be defined in all gen-riscv-* target except the +// gen-riscv-vector-test. +// gen-riscv-v-tests.sh will define each marco to generate each intrinsic test +// in different files. It mean adding the new definition also need to update +// op_list in gen-riscv-v-tests.sh. +#ifdef ALL +#define VADD +#define VFADD +#endif + +// Vector Integer Arithmetic Operations +// Vector Single-Width Integer Add and Subtract +#ifdef VADD +defm vadd : RVVBinBuiltinSet<"vadd", "csil", + [["vv", "v", "vvv"], + ["vx", "v", "vve"], + ["vv", "Uv", "UvUvUv"], + ["vx", "Uv", "UvUvUe"]]>; +#endif + +#ifdef VFADD +defm vfadd : RVVBinBuiltinSet<"vfadd", "fd", + [["vv", "v", "vvv"], + ["vf", "v", "vve"]]>; +#endif diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -17567,196 +17567,7 @@ // Required for overloaded intrinsics. llvm::SmallVector IntrinsicTypes; switch (BuiltinID) { - // We could generate all the possible combinations and handling code in - // a file and include it here, instead of listing all the builtins plainly. - // Something like - // #include clang/Basic/RISCVVBuiltinCodeGen.inc - case RISCV::BIvadd_vv_i8m1_vl: - case RISCV::BIvadd_vv_i16m1_vl: - case RISCV::BIvadd_vv_i32m1_vl: - case RISCV::BIvadd_vv_i64m1_vl: - case RISCV::BIvadd_vv_i8m2_vl: - case RISCV::BIvadd_vv_i16m2_vl: - case RISCV::BIvadd_vv_i32m2_vl: - case RISCV::BIvadd_vv_i64m2_vl: - case RISCV::BIvadd_vv_i8m4_vl: - case RISCV::BIvadd_vv_i16m4_vl: - case RISCV::BIvadd_vv_i32m4_vl: - case RISCV::BIvadd_vv_i64m4_vl: - case RISCV::BIvadd_vv_i8m8_vl: - case RISCV::BIvadd_vv_i16m8_vl: - case RISCV::BIvadd_vv_i32m8_vl: - case RISCV::BIvadd_vv_i64m8_vl: - case RISCV::BIvadd_vv_i8mf2_vl: - case RISCV::BIvadd_vv_i16mf2_vl: - case RISCV::BIvadd_vv_i32mf2_vl: - case RISCV::BIvadd_vv_i8mf4_vl: - case RISCV::BIvadd_vv_i16mf4_vl: - case RISCV::BIvadd_vv_i8mf8_vl: - case RISCV::BIvadd_vx_i8m1_vl: - case RISCV::BIvadd_vx_i16m1_vl: - case RISCV::BIvadd_vx_i32m1_vl: - case RISCV::BIvadd_vx_i64m1_vl: - case RISCV::BIvadd_vx_i8m2_vl: - case RISCV::BIvadd_vx_i16m2_vl: - case RISCV::BIvadd_vx_i32m2_vl: - case RISCV::BIvadd_vx_i64m2_vl: - case RISCV::BIvadd_vx_i8m4_vl: - case RISCV::BIvadd_vx_i16m4_vl: - case RISCV::BIvadd_vx_i32m4_vl: - case RISCV::BIvadd_vx_i64m4_vl: - case RISCV::BIvadd_vx_i8m8_vl: - case RISCV::BIvadd_vx_i16m8_vl: - case RISCV::BIvadd_vx_i32m8_vl: - case RISCV::BIvadd_vx_i64m8_vl: - case RISCV::BIvadd_vx_i8mf2_vl: - case RISCV::BIvadd_vx_i16mf2_vl: - case RISCV::BIvadd_vx_i32mf2_vl: - case RISCV::BIvadd_vx_i8mf4_vl: - case RISCV::BIvadd_vx_i16mf4_vl: - case RISCV::BIvadd_vx_i8mf8_vl: - case RISCV::BIvadd_vv_u8m1_vl: - case RISCV::BIvadd_vv_u16m1_vl: - case RISCV::BIvadd_vv_u32m1_vl: - case RISCV::BIvadd_vv_u64m1_vl: - case RISCV::BIvadd_vv_u8m2_vl: - case RISCV::BIvadd_vv_u16m2_vl: - case RISCV::BIvadd_vv_u32m2_vl: - case RISCV::BIvadd_vv_u64m2_vl: - case RISCV::BIvadd_vv_u8m4_vl: - case RISCV::BIvadd_vv_u16m4_vl: - case RISCV::BIvadd_vv_u32m4_vl: - case RISCV::BIvadd_vv_u64m4_vl: - case RISCV::BIvadd_vv_u8m8_vl: - case RISCV::BIvadd_vv_u16m8_vl: - case RISCV::BIvadd_vv_u32m8_vl: - case RISCV::BIvadd_vv_u64m8_vl: - case RISCV::BIvadd_vv_u8mf2_vl: - case RISCV::BIvadd_vv_u16mf2_vl: - case RISCV::BIvadd_vv_u32mf2_vl: - case RISCV::BIvadd_vv_u8mf4_vl: - case RISCV::BIvadd_vv_u16mf4_vl: - case RISCV::BIvadd_vv_u8mf8_vl: - case RISCV::BIvadd_vx_u8m1_vl: - case RISCV::BIvadd_vx_u16m1_vl: - case RISCV::BIvadd_vx_u32m1_vl: - case RISCV::BIvadd_vx_u64m1_vl: - case RISCV::BIvadd_vx_u8m2_vl: - case RISCV::BIvadd_vx_u16m2_vl: - case RISCV::BIvadd_vx_u32m2_vl: - case RISCV::BIvadd_vx_u64m2_vl: - case RISCV::BIvadd_vx_u8m4_vl: - case RISCV::BIvadd_vx_u16m4_vl: - case RISCV::BIvadd_vx_u32m4_vl: - case RISCV::BIvadd_vx_u64m4_vl: - case RISCV::BIvadd_vx_u8m8_vl: - case RISCV::BIvadd_vx_u16m8_vl: - case RISCV::BIvadd_vx_u32m8_vl: - case RISCV::BIvadd_vx_u64m8_vl: - case RISCV::BIvadd_vx_u8mf2_vl: - case RISCV::BIvadd_vx_u16mf2_vl: - case RISCV::BIvadd_vx_u32mf2_vl: - case RISCV::BIvadd_vx_u8mf4_vl: - case RISCV::BIvadd_vx_u16mf4_vl: - case RISCV::BIvadd_vx_u8mf8_vl: - // The order of operands is (op1, op2, vl). - ID = Intrinsic::riscv_vadd; - IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType()}; - break; - case RISCV::BIvadd_vv_i8m1_m_vl: - case RISCV::BIvadd_vv_i16m1_m_vl: - case RISCV::BIvadd_vv_i32m1_m_vl: - case RISCV::BIvadd_vv_i64m1_m_vl: - case RISCV::BIvadd_vv_i8m2_m_vl: - case RISCV::BIvadd_vv_i16m2_m_vl: - case RISCV::BIvadd_vv_i32m2_m_vl: - case RISCV::BIvadd_vv_i64m2_m_vl: - case RISCV::BIvadd_vv_i8m4_m_vl: - case RISCV::BIvadd_vv_i16m4_m_vl: - case RISCV::BIvadd_vv_i32m4_m_vl: - case RISCV::BIvadd_vv_i64m4_m_vl: - case RISCV::BIvadd_vv_i8m8_m_vl: - case RISCV::BIvadd_vv_i16m8_m_vl: - case RISCV::BIvadd_vv_i32m8_m_vl: - case RISCV::BIvadd_vv_i64m8_m_vl: - case RISCV::BIvadd_vv_i8mf2_m_vl: - case RISCV::BIvadd_vv_i16mf2_m_vl: - case RISCV::BIvadd_vv_i32mf2_m_vl: - case RISCV::BIvadd_vv_i8mf4_m_vl: - case RISCV::BIvadd_vv_i16mf4_m_vl: - case RISCV::BIvadd_vv_i8mf8_m_vl: - case RISCV::BIvadd_vx_i8m1_m_vl: - case RISCV::BIvadd_vx_i16m1_m_vl: - case RISCV::BIvadd_vx_i32m1_m_vl: - case RISCV::BIvadd_vx_i64m1_m_vl: - case RISCV::BIvadd_vx_i8m2_m_vl: - case RISCV::BIvadd_vx_i16m2_m_vl: - case RISCV::BIvadd_vx_i32m2_m_vl: - case RISCV::BIvadd_vx_i64m2_m_vl: - case RISCV::BIvadd_vx_i8m4_m_vl: - case RISCV::BIvadd_vx_i16m4_m_vl: - case RISCV::BIvadd_vx_i32m4_m_vl: - case RISCV::BIvadd_vx_i64m4_m_vl: - case RISCV::BIvadd_vx_i8m8_m_vl: - case RISCV::BIvadd_vx_i16m8_m_vl: - case RISCV::BIvadd_vx_i32m8_m_vl: - case RISCV::BIvadd_vx_i64m8_m_vl: - case RISCV::BIvadd_vx_i8mf2_m_vl: - case RISCV::BIvadd_vx_i16mf2_m_vl: - case RISCV::BIvadd_vx_i32mf2_m_vl: - case RISCV::BIvadd_vx_i8mf4_m_vl: - case RISCV::BIvadd_vx_i16mf4_m_vl: - case RISCV::BIvadd_vx_i8mf8_m_vl: - case RISCV::BIvadd_vv_u8m1_m_vl: - case RISCV::BIvadd_vv_u16m1_m_vl: - case RISCV::BIvadd_vv_u32m1_m_vl: - case RISCV::BIvadd_vv_u64m1_m_vl: - case RISCV::BIvadd_vv_u8m2_m_vl: - case RISCV::BIvadd_vv_u16m2_m_vl: - case RISCV::BIvadd_vv_u32m2_m_vl: - case RISCV::BIvadd_vv_u64m2_m_vl: - case RISCV::BIvadd_vv_u8m4_m_vl: - case RISCV::BIvadd_vv_u16m4_m_vl: - case RISCV::BIvadd_vv_u32m4_m_vl: - case RISCV::BIvadd_vv_u64m4_m_vl: - case RISCV::BIvadd_vv_u8m8_m_vl: - case RISCV::BIvadd_vv_u16m8_m_vl: - case RISCV::BIvadd_vv_u32m8_m_vl: - case RISCV::BIvadd_vv_u64m8_m_vl: - case RISCV::BIvadd_vv_u8mf2_m_vl: - case RISCV::BIvadd_vv_u16mf2_m_vl: - case RISCV::BIvadd_vv_u32mf2_m_vl: - case RISCV::BIvadd_vv_u8mf4_m_vl: - case RISCV::BIvadd_vv_u16mf4_m_vl: - case RISCV::BIvadd_vv_u8mf8_m_vl: - case RISCV::BIvadd_vx_u8m1_m_vl: - case RISCV::BIvadd_vx_u16m1_m_vl: - case RISCV::BIvadd_vx_u32m1_m_vl: - case RISCV::BIvadd_vx_u64m1_m_vl: - case RISCV::BIvadd_vx_u8m2_m_vl: - case RISCV::BIvadd_vx_u16m2_m_vl: - case RISCV::BIvadd_vx_u32m2_m_vl: - case RISCV::BIvadd_vx_u64m2_m_vl: - case RISCV::BIvadd_vx_u8m4_m_vl: - case RISCV::BIvadd_vx_u16m4_m_vl: - case RISCV::BIvadd_vx_u32m4_m_vl: - case RISCV::BIvadd_vx_u64m4_m_vl: - case RISCV::BIvadd_vx_u8m8_m_vl: - case RISCV::BIvadd_vx_u16m8_m_vl: - case RISCV::BIvadd_vx_u32m8_m_vl: - case RISCV::BIvadd_vx_u64m8_m_vl: - case RISCV::BIvadd_vx_u8mf2_m_vl: - case RISCV::BIvadd_vx_u16mf2_m_vl: - case RISCV::BIvadd_vx_u32mf2_m_vl: - case RISCV::BIvadd_vx_u8mf4_m_vl: - case RISCV::BIvadd_vx_u16mf4_m_vl: - case RISCV::BIvadd_vx_u8mf8_m_vl: - ID = Intrinsic::riscv_vadd_mask; - // The order of operands is (mask, maskedoff, op1, op2, vl). - IntrinsicTypes = {ResultType, Ops[3]->getType(), Ops[4]->getType()}; - // The order of intrinsic operands is (maskedoff, op1, op2, mask, vl). - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); - break; +#include "clang/Basic/riscv_vector_builtin_cg.inc" } assert(ID != Intrinsic::not_intrinsic); diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt --- a/clang/lib/Headers/CMakeLists.txt +++ b/clang/lib/Headers/CMakeLists.txt @@ -179,14 +179,18 @@ set(out_files ${out_files} PARENT_SCOPE) endfunction(copy_header_to_output_dir) -function(clang_generate_header td_option td_file out_file) - clang_tablegen(${out_file} ${td_option} +function(clang_generate_header) + # Syntax: + # clang_generate_header [tablegen-arg ...] SOURCE td_file OUTPUT output-file + cmake_parse_arguments(td "" "SOURCE;OUTPUT" "" ${ARGN}) + + clang_tablegen(${td_OUTPUT} ${td_UNPARSED_ARGUMENTS} -I ${CLANG_SOURCE_DIR}/include/clang/Basic/ - SOURCE ${CLANG_SOURCE_DIR}/include/clang/Basic/${td_file}) + SOURCE ${CLANG_SOURCE_DIR}/include/clang/Basic/${td_SOURCE}) - copy_header_to_output_dir(${CMAKE_CURRENT_BINARY_DIR} ${out_file}) + copy_header_to_output_dir(${CMAKE_CURRENT_BINARY_DIR} ${td_OUTPUT}) set(out_files ${out_files} PARENT_SCOPE) - list(APPEND generated_files "${CMAKE_CURRENT_BINARY_DIR}/${out_file}") + list(APPEND generated_files "${CMAKE_CURRENT_BINARY_DIR}/${td_OUTPUT}") set(generated_files ${generated_files} PARENT_SCOPE) endfunction(clang_generate_header) @@ -198,17 +202,37 @@ # Generate header files and copy them to the build directory # Generate arm_neon.h -clang_generate_header(-gen-arm-neon arm_neon.td arm_neon.h) +clang_generate_header(-gen-arm-neon + SOURCE arm_neon.td + OUTPUT arm_neon.h) # Generate arm_fp16.h -clang_generate_header(-gen-arm-fp16 arm_fp16.td arm_fp16.h) +clang_generate_header(-gen-arm-fp16 + SOURCE arm_fp16.td + OUTPUT arm_fp16.h) # Generate arm_sve.h -clang_generate_header(-gen-arm-sve-header arm_sve.td arm_sve.h) +clang_generate_header(-gen-arm-sve-header + SOURCE arm_sve.td + OUTPUT arm_sve.h) # Generate arm_bf16.h -clang_generate_header(-gen-arm-bf16 arm_bf16.td arm_bf16.h) +clang_generate_header(-gen-arm-bf16 + SOURCE arm_bf16.td + OUTPUT arm_bf16.h) # Generate arm_mve.h -clang_generate_header(-gen-arm-mve-header arm_mve.td arm_mve.h) +clang_generate_header(-gen-arm-mve-header + SOURCE arm_mve.td + OUTPUT arm_mve.h) # Generate arm_cde.h -clang_generate_header(-gen-arm-cde-header arm_cde.td arm_cde.h) +clang_generate_header(-gen-arm-cde-header + SOURCE arm_cde.td + OUTPUT arm_cde.h) +# Generate riscv_vector.h +clang_generate_header(-gen-riscv-vector-header -D=ALL + SOURCE riscv_vector.td + OUTPUT riscv_vector.h) +# Generate riscv_vector_generic.h +clang_generate_header(-gen-riscv-vector-generic-header -D=ALL + SOURCE riscv_vector.td + OUTPUT riscv_vector_generic.h) add_custom_target(clang-resource-headers ALL DEPENDS ${out_files}) set_target_properties(clang-resource-headers PROPERTIES diff --git a/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics-generic/vadd.c b/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics-generic/vadd.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics-generic/vadd.c @@ -0,0 +1,2496 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t +// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t + +// NOTE: This file is autogenerated by 'bin/clang-tblgen -gen-riscv-vector-test clang/include/clang/Basic/riscv_vector.td -D=' + +// ASM-NOT: warning +#include + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vv_i8m1_vl(vint8m1_t arg_1, vint8m1_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vv_i8m1_m_vl(vbool8_t arg_1, vint8m1_t arg_2, vint8m1_t arg_3, vint8m1_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vv_i8m2_vl(vint8m2_t arg_1, vint8m2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vv_i8m2_m_vl(vbool4_t arg_1, vint8m2_t arg_2, vint8m2_t arg_3, vint8m2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vv_i8m4_vl(vint8m4_t arg_1, vint8m4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vv_i8m4_m_vl(vbool2_t arg_1, vint8m4_t arg_2, vint8m4_t arg_3, vint8m4_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vv_i8m8_vl(vint8m8_t arg_1, vint8m8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vv_i8m8_m_vl(vbool1_t arg_1, vint8m8_t arg_2, vint8m8_t arg_3, vint8m8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vv_i8mf2_vl(vint8mf2_t arg_1, vint8mf2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vv_i8mf2_m_vl(vbool16_t arg_1, vint8mf2_t arg_2, vint8mf2_t arg_3, vint8mf2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vv_i8mf4_vl(vint8mf4_t arg_1, vint8mf4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vv_i8mf4_m_vl(vbool32_t arg_1, vint8mf4_t arg_2, vint8mf4_t arg_3, vint8mf4_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vv_i8mf8_vl(vint8mf8_t arg_1, vint8mf8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vv_i8mf8_m_vl(vbool64_t arg_1, vint8mf8_t arg_2, vint8mf8_t arg_3, vint8mf8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vv_i16m1_vl(vint16m1_t arg_1, vint16m1_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vv_i16m1_m_vl(vbool16_t arg_1, vint16m1_t arg_2, vint16m1_t arg_3, vint16m1_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vv_i16m2_vl(vint16m2_t arg_1, vint16m2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vv_i16m2_m_vl(vbool8_t arg_1, vint16m2_t arg_2, vint16m2_t arg_3, vint16m2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vv_i16m4_vl(vint16m4_t arg_1, vint16m4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vv_i16m4_m_vl(vbool4_t arg_1, vint16m4_t arg_2, vint16m4_t arg_3, vint16m4_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vv_i16m8_vl(vint16m8_t arg_1, vint16m8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vv_i16m8_m_vl(vbool2_t arg_1, vint16m8_t arg_2, vint16m8_t arg_3, vint16m8_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vv_i16mf2_vl(vint16mf2_t arg_1, vint16mf2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vv_i16mf2_m_vl(vbool32_t arg_1, vint16mf2_t arg_2, vint16mf2_t arg_3, vint16mf2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vv_i16mf4_vl(vint16mf4_t arg_1, vint16mf4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vv_i16mf4_m_vl(vbool64_t arg_1, vint16mf4_t arg_2, vint16mf4_t arg_3, vint16mf4_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vv_i32m1_vl(vint32m1_t arg_1, vint32m1_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vv_i32m1_m_vl(vbool32_t arg_1, vint32m1_t arg_2, vint32m1_t arg_3, vint32m1_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vv_i32m2_vl(vint32m2_t arg_1, vint32m2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vv_i32m2_m_vl(vbool16_t arg_1, vint32m2_t arg_2, vint32m2_t arg_3, vint32m2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vv_i32m4_vl(vint32m4_t arg_1, vint32m4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vv_i32m4_m_vl(vbool8_t arg_1, vint32m4_t arg_2, vint32m4_t arg_3, vint32m4_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vv_i32m8_vl(vint32m8_t arg_1, vint32m8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vv_i32m8_m_vl(vbool4_t arg_1, vint32m8_t arg_2, vint32m8_t arg_3, vint32m8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vv_i32mf2_vl(vint32mf2_t arg_1, vint32mf2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vv_i32mf2_m_vl(vbool64_t arg_1, vint32mf2_t arg_2, vint32mf2_t arg_3, vint32mf2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vv_i64m1_vl(vint64m1_t arg_1, vint64m1_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vv_i64m1_m_vl(vbool64_t arg_1, vint64m1_t arg_2, vint64m1_t arg_3, vint64m1_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vv_i64m2_vl(vint64m2_t arg_1, vint64m2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vv_i64m2_m_vl(vbool32_t arg_1, vint64m2_t arg_2, vint64m2_t arg_3, vint64m2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vv_i64m4_vl(vint64m4_t arg_1, vint64m4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vv_i64m4_m_vl(vbool16_t arg_1, vint64m4_t arg_2, vint64m4_t arg_3, vint64m4_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vv_i64m8_vl(vint64m8_t arg_1, vint64m8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vv_i64m8_m_vl(vbool8_t arg_1, vint64m8_t arg_2, vint64m8_t arg_3, vint64m8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vx_i8m1_vl(vint8m1_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vx_i8m1_m_vl(vbool8_t arg_1, vint8m1_t arg_2, vint8m1_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vx_i8m2_vl(vint8m2_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vx_i8m2_m_vl(vbool4_t arg_1, vint8m2_t arg_2, vint8m2_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vx_i8m4_vl(vint8m4_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vx_i8m4_m_vl(vbool2_t arg_1, vint8m4_t arg_2, vint8m4_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vx_i8m8_vl(vint8m8_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vx_i8m8_m_vl(vbool1_t arg_1, vint8m8_t arg_2, vint8m8_t arg_3, int8_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vx_i8mf2_vl(vint8mf2_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vx_i8mf2_m_vl(vbool16_t arg_1, vint8mf2_t arg_2, vint8mf2_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vx_i8mf4_vl(vint8mf4_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vx_i8mf4_m_vl(vbool32_t arg_1, vint8mf4_t arg_2, vint8mf4_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vx_i8mf8_vl(vint8mf8_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vx_i8mf8_m_vl(vbool64_t arg_1, vint8mf8_t arg_2, vint8mf8_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vx_i16m1_vl(vint16m1_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vx_i16m1_m_vl(vbool16_t arg_1, vint16m1_t arg_2, vint16m1_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vx_i16m2_vl(vint16m2_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vx_i16m2_m_vl(vbool8_t arg_1, vint16m2_t arg_2, vint16m2_t arg_3, int16_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vx_i16m4_vl(vint16m4_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vx_i16m4_m_vl(vbool4_t arg_1, vint16m4_t arg_2, vint16m4_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vx_i16m8_vl(vint16m8_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vx_i16m8_m_vl(vbool2_t arg_1, vint16m8_t arg_2, vint16m8_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vx_i16mf2_vl(vint16mf2_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vx_i16mf2_m_vl(vbool32_t arg_1, vint16mf2_t arg_2, vint16mf2_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vx_i16mf4_vl(vint16mf4_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vx_i16mf4_m_vl(vbool64_t arg_1, vint16mf4_t arg_2, vint16mf4_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vx_i32m1_vl(vint32m1_t arg_1, int32_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vx_i32m1_m_vl(vbool32_t arg_1, vint32m1_t arg_2, vint32m1_t arg_3, int32_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vx_i32m2_vl(vint32m2_t arg_1, int32_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vx_i32m2_m_vl(vbool16_t arg_1, vint32m2_t arg_2, vint32m2_t arg_3, int32_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vx_i32m4_vl(vint32m4_t arg_1, int32_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vx_i32m4_m_vl(vbool8_t arg_1, vint32m4_t arg_2, vint32m4_t arg_3, int32_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vx_i32m8_vl(vint32m8_t arg_1, int32_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vx_i32m8_m_vl(vbool4_t arg_1, vint32m8_t arg_2, vint32m8_t arg_3, int32_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vx_i32mf2_vl(vint32mf2_t arg_1, int32_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vx_i32mf2_m_vl(vbool64_t arg_1, vint32mf2_t arg_2, vint32mf2_t arg_3, int32_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vx_i64m1_vl(vint64m1_t arg_1, int64_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vx_i64m1_m_vl(vbool64_t arg_1, vint64m1_t arg_2, vint64m1_t arg_3, int64_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vx_i64m2_vl(vint64m2_t arg_1, int64_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vx_i64m2_m_vl(vbool32_t arg_1, vint64m2_t arg_2, vint64m2_t arg_3, int64_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vx_i64m4_vl(vint64m4_t arg_1, int64_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vx_i64m4_m_vl(vbool16_t arg_1, vint64m4_t arg_2, vint64m4_t arg_3, int64_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vx_i64m8_vl(vint64m8_t arg_1, int64_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vx_i64m8_m_vl(vbool8_t arg_1, vint64m8_t arg_2, vint64m8_t arg_3, int64_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vv_u8m1_vl(vuint8m1_t arg_1, vuint8m1_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vv_u8m1_m_vl(vbool8_t arg_1, vuint8m1_t arg_2, vuint8m1_t arg_3, vuint8m1_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vv_u8m2_vl(vuint8m2_t arg_1, vuint8m2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vv_u8m2_m_vl(vbool4_t arg_1, vuint8m2_t arg_2, vuint8m2_t arg_3, vuint8m2_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vv_u8m4_vl(vuint8m4_t arg_1, vuint8m4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vv_u8m4_m_vl(vbool2_t arg_1, vuint8m4_t arg_2, vuint8m4_t arg_3, vuint8m4_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vv_u8m8_vl(vuint8m8_t arg_1, vuint8m8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vv_u8m8_m_vl(vbool1_t arg_1, vuint8m8_t arg_2, vuint8m8_t arg_3, vuint8m8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vv_u8mf2_vl(vuint8mf2_t arg_1, vuint8mf2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vv_u8mf2_m_vl(vbool16_t arg_1, vuint8mf2_t arg_2, vuint8mf2_t arg_3, vuint8mf2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vv_u8mf4_vl(vuint8mf4_t arg_1, vuint8mf4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vv_u8mf4_m_vl(vbool32_t arg_1, vuint8mf4_t arg_2, vuint8mf4_t arg_3, vuint8mf4_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vv_u8mf8_vl(vuint8mf8_t arg_1, vuint8mf8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vv_u8mf8_m_vl(vbool64_t arg_1, vuint8mf8_t arg_2, vuint8mf8_t arg_3, vuint8mf8_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vv_u16m1_vl(vuint16m1_t arg_1, vuint16m1_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vv_u16m1_m_vl(vbool16_t arg_1, vuint16m1_t arg_2, vuint16m1_t arg_3, vuint16m1_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vv_u16m2_vl(vuint16m2_t arg_1, vuint16m2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vv_u16m2_m_vl(vbool8_t arg_1, vuint16m2_t arg_2, vuint16m2_t arg_3, vuint16m2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vv_u16m4_vl(vuint16m4_t arg_1, vuint16m4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vv_u16m4_m_vl(vbool4_t arg_1, vuint16m4_t arg_2, vuint16m4_t arg_3, vuint16m4_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vv_u16m8_vl(vuint16m8_t arg_1, vuint16m8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vv_u16m8_m_vl(vbool2_t arg_1, vuint16m8_t arg_2, vuint16m8_t arg_3, vuint16m8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vv_u16mf2_vl(vuint16mf2_t arg_1, vuint16mf2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vv_u16mf2_m_vl(vbool32_t arg_1, vuint16mf2_t arg_2, vuint16mf2_t arg_3, vuint16mf2_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vv_u16mf4_vl(vuint16mf4_t arg_1, vuint16mf4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vv_u16mf4_m_vl(vbool64_t arg_1, vuint16mf4_t arg_2, vuint16mf4_t arg_3, vuint16mf4_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vv_u32m1_vl(vuint32m1_t arg_1, vuint32m1_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vv_u32m1_m_vl(vbool32_t arg_1, vuint32m1_t arg_2, vuint32m1_t arg_3, vuint32m1_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vv_u32m2_vl(vuint32m2_t arg_1, vuint32m2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vv_u32m2_m_vl(vbool16_t arg_1, vuint32m2_t arg_2, vuint32m2_t arg_3, vuint32m2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vv_u32m4_vl(vuint32m4_t arg_1, vuint32m4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vv_u32m4_m_vl(vbool8_t arg_1, vuint32m4_t arg_2, vuint32m4_t arg_3, vuint32m4_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vv_u32m8_vl(vuint32m8_t arg_1, vuint32m8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vv_u32m8_m_vl(vbool4_t arg_1, vuint32m8_t arg_2, vuint32m8_t arg_3, vuint32m8_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vv_u32mf2_vl(vuint32mf2_t arg_1, vuint32mf2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vv_u32mf2_m_vl(vbool64_t arg_1, vuint32mf2_t arg_2, vuint32mf2_t arg_3, vuint32mf2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vv_u64m1_vl(vuint64m1_t arg_1, vuint64m1_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vv_u64m1_m_vl(vbool64_t arg_1, vuint64m1_t arg_2, vuint64m1_t arg_3, vuint64m1_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vv_u64m2_vl(vuint64m2_t arg_1, vuint64m2_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vv_u64m2_m_vl(vbool32_t arg_1, vuint64m2_t arg_2, vuint64m2_t arg_3, vuint64m2_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vv_u64m4_vl(vuint64m4_t arg_1, vuint64m4_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vv_u64m4_m_vl(vbool16_t arg_1, vuint64m4_t arg_2, vuint64m4_t arg_3, vuint64m4_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vv_u64m8_vl(vuint64m8_t arg_1, vuint64m8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vv_u64m8_m_vl(vbool8_t arg_1, vuint64m8_t arg_2, vuint64m8_t arg_3, vuint64m8_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vx_u8m1_vl(vuint8m1_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vx_u8m1_m_vl(vbool8_t arg_1, vuint8m1_t arg_2, vuint8m1_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vx_u8m2_vl(vuint8m2_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vx_u8m2_m_vl(vbool4_t arg_1, vuint8m2_t arg_2, vuint8m2_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vx_u8m4_vl(vuint8m4_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vx_u8m4_m_vl(vbool2_t arg_1, vuint8m4_t arg_2, vuint8m4_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vx_u8m8_vl(vuint8m8_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vx_u8m8_m_vl(vbool1_t arg_1, vuint8m8_t arg_2, vuint8m8_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vx_u8mf2_vl(vuint8mf2_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vx_u8mf2_m_vl(vbool16_t arg_1, vuint8mf2_t arg_2, vuint8mf2_t arg_3, uint8_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vx_u8mf4_vl(vuint8mf4_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vx_u8mf4_m_vl(vbool32_t arg_1, vuint8mf4_t arg_2, vuint8mf4_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vx_u8mf8_vl(vuint8mf8_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vx_u8mf8_m_vl(vbool64_t arg_1, vuint8mf8_t arg_2, vuint8mf8_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vx_u16m1_vl(vuint16m1_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vx_u16m1_m_vl(vbool16_t arg_1, vuint16m1_t arg_2, vuint16m1_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vx_u16m2_vl(vuint16m2_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vx_u16m2_m_vl(vbool8_t arg_1, vuint16m2_t arg_2, vuint16m2_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vx_u16m4_vl(vuint16m4_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vx_u16m4_m_vl(vbool4_t arg_1, vuint16m4_t arg_2, vuint16m4_t arg_3, uint16_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vx_u16m8_vl(vuint16m8_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vx_u16m8_m_vl(vbool2_t arg_1, vuint16m8_t arg_2, vuint16m8_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vx_u16mf2_vl(vuint16mf2_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vx_u16mf2_m_vl(vbool32_t arg_1, vuint16mf2_t arg_2, vuint16mf2_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vx_u16mf4_vl(vuint16mf4_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vx_u16mf4_m_vl(vbool64_t arg_1, vuint16mf4_t arg_2, vuint16mf4_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vx_u32m1_vl(vuint32m1_t arg_1, uint32_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vx_u32m1_m_vl(vbool32_t arg_1, vuint32m1_t arg_2, vuint32m1_t arg_3, uint32_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vx_u32m2_vl(vuint32m2_t arg_1, uint32_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vx_u32m2_m_vl(vbool16_t arg_1, vuint32m2_t arg_2, vuint32m2_t arg_3, uint32_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vx_u32m4_vl(vuint32m4_t arg_1, uint32_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vx_u32m4_m_vl(vbool8_t arg_1, vuint32m4_t arg_2, vuint32m4_t arg_3, uint32_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vx_u32m8_vl(vuint32m8_t arg_1, uint32_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vx_u32m8_m_vl(vbool4_t arg_1, vuint32m8_t arg_2, vuint32m8_t arg_3, uint32_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vx_u32mf2_vl(vuint32mf2_t arg_1, uint32_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vx_u32mf2_m_vl(vbool64_t arg_1, vuint32mf2_t arg_2, vuint32mf2_t arg_3, uint32_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vx_u64m1_vl(vuint64m1_t arg_1, uint64_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vx_u64m1_m_vl(vbool64_t arg_1, vuint64m1_t arg_2, vuint64m1_t arg_3, uint64_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vx_u64m2_vl(vuint64m2_t arg_1, uint64_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vx_u64m2_m_vl(vbool32_t arg_1, vuint64m2_t arg_2, vuint64m2_t arg_3, uint64_t arg_4, size_t arg_5) { +// + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vx_u64m4_vl(vuint64m4_t arg_1, uint64_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vx_u64m4_m_vl(vbool16_t arg_1, vuint64m4_t arg_2, vuint64m4_t arg_3, uint64_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vx_u64m8_vl(vuint64m8_t arg_1, uint64_t arg_2, size_t arg_3) { + return vadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vx_u64m8_m_vl(vbool8_t arg_1, vuint64m8_t arg_2, vuint64m8_t arg_3, uint64_t arg_4, size_t arg_5) { + return vadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + diff --git a/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics-generic/vfadd.c b/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics-generic/vfadd.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics-generic/vfadd.c @@ -0,0 +1,518 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t +// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t + +// NOTE: This file is autogenerated by 'bin/clang-tblgen -gen-riscv-vector-test clang/include/clang/Basic/riscv_vector.td -D=' + +// ASM-NOT: warning +#include + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_vl(vfloat32m1_t arg_1, vfloat32m1_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_m_vl(vbool32_t arg_1, vfloat32m1_t arg_2, vfloat32m1_t arg_3, vfloat32m1_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_vl(vfloat32m2_t arg_1, vfloat32m2_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_m_vl(vbool16_t arg_1, vfloat32m2_t arg_2, vfloat32m2_t arg_3, vfloat32m2_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_vl(vfloat32m4_t arg_1, vfloat32m4_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_m_vl(vbool8_t arg_1, vfloat32m4_t arg_2, vfloat32m4_t arg_3, vfloat32m4_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_vl(vfloat32m8_t arg_1, vfloat32m8_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_m_vl(vbool4_t arg_1, vfloat32m8_t arg_2, vfloat32m8_t arg_3, vfloat32m8_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_vl(vfloat32mf2_t arg_1, vfloat32mf2_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_m_vl(vbool64_t arg_1, vfloat32mf2_t arg_2, vfloat32mf2_t arg_3, vfloat32mf2_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_vl(vfloat64m1_t arg_1, vfloat64m1_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_m_vl(vbool64_t arg_1, vfloat64m1_t arg_2, vfloat64m1_t arg_3, vfloat64m1_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_vl(vfloat64m2_t arg_1, vfloat64m2_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_m_vl(vbool32_t arg_1, vfloat64m2_t arg_2, vfloat64m2_t arg_3, vfloat64m2_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_vl(vfloat64m4_t arg_1, vfloat64m4_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_m_vl(vbool16_t arg_1, vfloat64m4_t arg_2, vfloat64m4_t arg_3, vfloat64m4_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_vl(vfloat64m8_t arg_1, vfloat64m8_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_m_vl(vbool8_t arg_1, vfloat64m8_t arg_2, vfloat64m8_t arg_3, vfloat64m8_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[ARG_1:%.*]], float [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i32( [[ARG_1:%.*]], float [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_vl(vfloat32m1_t arg_1, float32_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_m_vl(vbool32_t arg_1, vfloat32m1_t arg_2, vfloat32m1_t arg_3, float32_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[ARG_1:%.*]], float [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i32( [[ARG_1:%.*]], float [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_vl(vfloat32m2_t arg_1, float32_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_m_vl(vbool16_t arg_1, vfloat32m2_t arg_2, vfloat32m2_t arg_3, float32_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[ARG_1:%.*]], float [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i32( [[ARG_1:%.*]], float [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_vl(vfloat32m4_t arg_1, float32_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_m_vl(vbool8_t arg_1, vfloat32m4_t arg_2, vfloat32m4_t arg_3, float32_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[ARG_1:%.*]], float [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i32( [[ARG_1:%.*]], float [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_vl(vfloat32m8_t arg_1, float32_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_m_vl(vbool4_t arg_1, vfloat32m8_t arg_2, vfloat32m8_t arg_3, float32_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[ARG_1:%.*]], float [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i32( [[ARG_1:%.*]], float [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_vl(vfloat32mf2_t arg_1, float32_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_m_vl(vbool64_t arg_1, vfloat32mf2_t arg_2, vfloat32mf2_t arg_3, float32_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[ARG_1:%.*]], double [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i32( [[ARG_1:%.*]], double [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_vl(vfloat64m1_t arg_1, float64_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_m_vl(vbool64_t arg_1, vfloat64m1_t arg_2, vfloat64m1_t arg_3, float64_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[ARG_1:%.*]], double [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i32( [[ARG_1:%.*]], double [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_vl(vfloat64m2_t arg_1, float64_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_m_vl(vbool32_t arg_1, vfloat64m2_t arg_2, vfloat64m2_t arg_3, float64_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[ARG_1:%.*]], double [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i32( [[ARG_1:%.*]], double [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_vl(vfloat64m4_t arg_1, float64_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_m_vl(vbool16_t arg_1, vfloat64m4_t arg_2, vfloat64m4_t arg_3, float64_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[ARG_1:%.*]], double [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i32( [[ARG_1:%.*]], double [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_vl(vfloat64m8_t arg_1, float64_t arg_2, size_t arg_3) { + return vfadd_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_m_vl(vbool8_t arg_1, vfloat64m8_t arg_2, vfloat64m8_t arg_3, float64_t arg_4, size_t arg_5) { + return vfadd_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + diff --git a/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics/vadd.c b/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics/vadd.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics/vadd.c @@ -0,0 +1,2478 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t +// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t + +// NOTE: This file is autogenerated by 'bin/clang-tblgen -gen-riscv-vector-test clang/include/clang/Basic/riscv_vector.td -D=' + +// ASM-NOT: warning +#include + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vv_i8m1_vl(vint8m1_t arg_1, vint8m1_t arg_2, size_t arg_3) { + return vadd_vv_i8m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vv_i8m1_m_vl(vbool8_t arg_1, vint8m1_t arg_2, vint8m1_t arg_3, vint8m1_t arg_4, size_t arg_5) { + return vadd_vv_i8m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vv_i8m2_vl(vint8m2_t arg_1, vint8m2_t arg_2, size_t arg_3) { + return vadd_vv_i8m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vv_i8m2_m_vl(vbool4_t arg_1, vint8m2_t arg_2, vint8m2_t arg_3, vint8m2_t arg_4, size_t arg_5) { + return vadd_vv_i8m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vv_i8m4_vl(vint8m4_t arg_1, vint8m4_t arg_2, size_t arg_3) { + return vadd_vv_i8m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vv_i8m4_m_vl(vbool2_t arg_1, vint8m4_t arg_2, vint8m4_t arg_3, vint8m4_t arg_4, size_t arg_5) { + return vadd_vv_i8m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vv_i8m8_vl(vint8m8_t arg_1, vint8m8_t arg_2, size_t arg_3) { + return vadd_vv_i8m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vv_i8m8_m_vl(vbool1_t arg_1, vint8m8_t arg_2, vint8m8_t arg_3, vint8m8_t arg_4, size_t arg_5) { + return vadd_vv_i8m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vv_i8mf2_vl(vint8mf2_t arg_1, vint8mf2_t arg_2, size_t arg_3) { + return vadd_vv_i8mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vv_i8mf2_m_vl(vbool16_t arg_1, vint8mf2_t arg_2, vint8mf2_t arg_3, vint8mf2_t arg_4, size_t arg_5) { + return vadd_vv_i8mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vv_i8mf4_vl(vint8mf4_t arg_1, vint8mf4_t arg_2, size_t arg_3) { + return vadd_vv_i8mf4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vv_i8mf4_m_vl(vbool32_t arg_1, vint8mf4_t arg_2, vint8mf4_t arg_3, vint8mf4_t arg_4, size_t arg_5) { + return vadd_vv_i8mf4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vv_i8mf8_vl(vint8mf8_t arg_1, vint8mf8_t arg_2, size_t arg_3) { + return vadd_vv_i8mf8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i8mf8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vv_i8mf8_m_vl(vbool64_t arg_1, vint8mf8_t arg_2, vint8mf8_t arg_3, vint8mf8_t arg_4, size_t arg_5) { + return vadd_vv_i8mf8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vv_i16m1_vl(vint16m1_t arg_1, vint16m1_t arg_2, size_t arg_3) { + return vadd_vv_i16m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vv_i16m1_m_vl(vbool16_t arg_1, vint16m1_t arg_2, vint16m1_t arg_3, vint16m1_t arg_4, size_t arg_5) { + return vadd_vv_i16m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vv_i16m2_vl(vint16m2_t arg_1, vint16m2_t arg_2, size_t arg_3) { + return vadd_vv_i16m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vv_i16m2_m_vl(vbool8_t arg_1, vint16m2_t arg_2, vint16m2_t arg_3, vint16m2_t arg_4, size_t arg_5) { + return vadd_vv_i16m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vv_i16m4_vl(vint16m4_t arg_1, vint16m4_t arg_2, size_t arg_3) { + return vadd_vv_i16m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vv_i16m4_m_vl(vbool4_t arg_1, vint16m4_t arg_2, vint16m4_t arg_3, vint16m4_t arg_4, size_t arg_5) { + return vadd_vv_i16m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vv_i16m8_vl(vint16m8_t arg_1, vint16m8_t arg_2, size_t arg_3) { + return vadd_vv_i16m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vv_i16m8_m_vl(vbool2_t arg_1, vint16m8_t arg_2, vint16m8_t arg_3, vint16m8_t arg_4, size_t arg_5) { + return vadd_vv_i16m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vv_i16mf2_vl(vint16mf2_t arg_1, vint16mf2_t arg_2, size_t arg_3) { + return vadd_vv_i16mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vv_i16mf2_m_vl(vbool32_t arg_1, vint16mf2_t arg_2, vint16mf2_t arg_3, vint16mf2_t arg_4, size_t arg_5) { + return vadd_vv_i16mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vv_i16mf4_vl(vint16mf4_t arg_1, vint16mf4_t arg_2, size_t arg_3) { + return vadd_vv_i16mf4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i16mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vv_i16mf4_m_vl(vbool64_t arg_1, vint16mf4_t arg_2, vint16mf4_t arg_3, vint16mf4_t arg_4, size_t arg_5) { + return vadd_vv_i16mf4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vv_i32m1_vl(vint32m1_t arg_1, vint32m1_t arg_2, size_t arg_3) { + return vadd_vv_i32m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vv_i32m1_m_vl(vbool32_t arg_1, vint32m1_t arg_2, vint32m1_t arg_3, vint32m1_t arg_4, size_t arg_5) { + return vadd_vv_i32m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vv_i32m2_vl(vint32m2_t arg_1, vint32m2_t arg_2, size_t arg_3) { + return vadd_vv_i32m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vv_i32m2_m_vl(vbool16_t arg_1, vint32m2_t arg_2, vint32m2_t arg_3, vint32m2_t arg_4, size_t arg_5) { + return vadd_vv_i32m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vv_i32m4_vl(vint32m4_t arg_1, vint32m4_t arg_2, size_t arg_3) { + return vadd_vv_i32m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vv_i32m4_m_vl(vbool8_t arg_1, vint32m4_t arg_2, vint32m4_t arg_3, vint32m4_t arg_4, size_t arg_5) { + return vadd_vv_i32m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vv_i32m8_vl(vint32m8_t arg_1, vint32m8_t arg_2, size_t arg_3) { + return vadd_vv_i32m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vv_i32m8_m_vl(vbool4_t arg_1, vint32m8_t arg_2, vint32m8_t arg_3, vint32m8_t arg_4, size_t arg_5) { + return vadd_vv_i32m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vv_i32mf2_vl(vint32mf2_t arg_1, vint32mf2_t arg_2, size_t arg_3) { + return vadd_vv_i32mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vv_i32mf2_m_vl(vbool64_t arg_1, vint32mf2_t arg_2, vint32mf2_t arg_3, vint32mf2_t arg_4, size_t arg_5) { + return vadd_vv_i32mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vv_i64m1_vl(vint64m1_t arg_1, vint64m1_t arg_2, size_t arg_3) { + return vadd_vv_i64m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vv_i64m1_m_vl(vbool64_t arg_1, vint64m1_t arg_2, vint64m1_t arg_3, vint64m1_t arg_4, size_t arg_5) { + return vadd_vv_i64m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vv_i64m2_vl(vint64m2_t arg_1, vint64m2_t arg_2, size_t arg_3) { + return vadd_vv_i64m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vv_i64m2_m_vl(vbool32_t arg_1, vint64m2_t arg_2, vint64m2_t arg_3, vint64m2_t arg_4, size_t arg_5) { + return vadd_vv_i64m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vv_i64m4_vl(vint64m4_t arg_1, vint64m4_t arg_2, size_t arg_3) { + return vadd_vv_i64m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vv_i64m4_m_vl(vbool16_t arg_1, vint64m4_t arg_2, vint64m4_t arg_3, vint64m4_t arg_4, size_t arg_5) { + return vadd_vv_i64m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vv_i64m8_vl(vint64m8_t arg_1, vint64m8_t arg_2, size_t arg_3) { + return vadd_vv_i64m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_i64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vv_i64m8_m_vl(vbool8_t arg_1, vint64m8_t arg_2, vint64m8_t arg_3, vint64m8_t arg_4, size_t arg_5) { + return vadd_vv_i64m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vx_i8m1_vl(vint8m1_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vx_i8m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vx_i8m1_m_vl(vbool8_t arg_1, vint8m1_t arg_2, vint8m1_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_vx_i8m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vx_i8m2_vl(vint8m2_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vx_i8m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vx_i8m2_m_vl(vbool4_t arg_1, vint8m2_t arg_2, vint8m2_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_vx_i8m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vx_i8m4_vl(vint8m4_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vx_i8m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vx_i8m4_m_vl(vbool2_t arg_1, vint8m4_t arg_2, vint8m4_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_vx_i8m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vx_i8m8_vl(vint8m8_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vx_i8m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vx_i8m8_m_vl(vbool1_t arg_1, vint8m8_t arg_2, vint8m8_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_vx_i8m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vx_i8mf2_vl(vint8mf2_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vx_i8mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vx_i8mf2_m_vl(vbool16_t arg_1, vint8mf2_t arg_2, vint8mf2_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_vx_i8mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vx_i8mf4_vl(vint8mf4_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vx_i8mf4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vx_i8mf4_m_vl(vbool32_t arg_1, vint8mf4_t arg_2, vint8mf4_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_vx_i8mf4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vx_i8mf8_vl(vint8mf8_t arg_1, int8_t arg_2, size_t arg_3) { + return vadd_vx_i8mf8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i8mf8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vx_i8mf8_m_vl(vbool64_t arg_1, vint8mf8_t arg_2, vint8mf8_t arg_3, int8_t arg_4, size_t arg_5) { + return vadd_vx_i8mf8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vx_i16m1_vl(vint16m1_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vx_i16m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vx_i16m1_m_vl(vbool16_t arg_1, vint16m1_t arg_2, vint16m1_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_vx_i16m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vx_i16m2_vl(vint16m2_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vx_i16m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vx_i16m2_m_vl(vbool8_t arg_1, vint16m2_t arg_2, vint16m2_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_vx_i16m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vx_i16m4_vl(vint16m4_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vx_i16m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vx_i16m4_m_vl(vbool4_t arg_1, vint16m4_t arg_2, vint16m4_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_vx_i16m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vx_i16m8_vl(vint16m8_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vx_i16m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vx_i16m8_m_vl(vbool2_t arg_1, vint16m8_t arg_2, vint16m8_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_vx_i16m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vx_i16mf2_vl(vint16mf2_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vx_i16mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vx_i16mf2_m_vl(vbool32_t arg_1, vint16mf2_t arg_2, vint16mf2_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_vx_i16mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vx_i16mf4_vl(vint16mf4_t arg_1, int16_t arg_2, size_t arg_3) { + return vadd_vx_i16mf4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i16mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vx_i16mf4_m_vl(vbool64_t arg_1, vint16mf4_t arg_2, vint16mf4_t arg_3, int16_t arg_4, size_t arg_5) { + return vadd_vx_i16mf4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vx_i32m1_vl(vint32m1_t arg_1, int32_t arg_2, size_t arg_3) { + return vadd_vx_i32m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vx_i32m1_m_vl(vbool32_t arg_1, vint32m1_t arg_2, vint32m1_t arg_3, int32_t arg_4, size_t arg_5) { + return vadd_vx_i32m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vx_i32m2_vl(vint32m2_t arg_1, int32_t arg_2, size_t arg_3) { + return vadd_vx_i32m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vx_i32m2_m_vl(vbool16_t arg_1, vint32m2_t arg_2, vint32m2_t arg_3, int32_t arg_4, size_t arg_5) { + return vadd_vx_i32m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vx_i32m4_vl(vint32m4_t arg_1, int32_t arg_2, size_t arg_3) { + return vadd_vx_i32m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vx_i32m4_m_vl(vbool8_t arg_1, vint32m4_t arg_2, vint32m4_t arg_3, int32_t arg_4, size_t arg_5) { + return vadd_vx_i32m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vx_i32m8_vl(vint32m8_t arg_1, int32_t arg_2, size_t arg_3) { + return vadd_vx_i32m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vx_i32m8_m_vl(vbool4_t arg_1, vint32m8_t arg_2, vint32m8_t arg_3, int32_t arg_4, size_t arg_5) { + return vadd_vx_i32m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vx_i32mf2_vl(vint32mf2_t arg_1, int32_t arg_2, size_t arg_3) { + return vadd_vx_i32mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vx_i32mf2_m_vl(vbool64_t arg_1, vint32mf2_t arg_2, vint32mf2_t arg_3, int32_t arg_4, size_t arg_5) { + return vadd_vx_i32mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vx_i64m1_vl(vint64m1_t arg_1, int64_t arg_2, size_t arg_3) { + return vadd_vx_i64m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vx_i64m1_m_vl(vbool64_t arg_1, vint64m1_t arg_2, vint64m1_t arg_3, int64_t arg_4, size_t arg_5) { + return vadd_vx_i64m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vx_i64m2_vl(vint64m2_t arg_1, int64_t arg_2, size_t arg_3) { + return vadd_vx_i64m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vx_i64m2_m_vl(vbool32_t arg_1, vint64m2_t arg_2, vint64m2_t arg_3, int64_t arg_4, size_t arg_5) { + return vadd_vx_i64m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vx_i64m4_vl(vint64m4_t arg_1, int64_t arg_2, size_t arg_3) { + return vadd_vx_i64m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vx_i64m4_m_vl(vbool16_t arg_1, vint64m4_t arg_2, vint64m4_t arg_3, int64_t arg_4, size_t arg_5) { + return vadd_vx_i64m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vx_i64m8_vl(vint64m8_t arg_1, int64_t arg_2, size_t arg_3) { + return vadd_vx_i64m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_i64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vx_i64m8_m_vl(vbool8_t arg_1, vint64m8_t arg_2, vint64m8_t arg_3, int64_t arg_4, size_t arg_5) { + return vadd_vx_i64m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vv_u8m1_vl(vuint8m1_t arg_1, vuint8m1_t arg_2, size_t arg_3) { + return vadd_vv_u8m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vv_u8m1_m_vl(vbool8_t arg_1, vuint8m1_t arg_2, vuint8m1_t arg_3, vuint8m1_t arg_4, size_t arg_5) { + return vadd_vv_u8m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vv_u8m2_vl(vuint8m2_t arg_1, vuint8m2_t arg_2, size_t arg_3) { + return vadd_vv_u8m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vv_u8m2_m_vl(vbool4_t arg_1, vuint8m2_t arg_2, vuint8m2_t arg_3, vuint8m2_t arg_4, size_t arg_5) { + return vadd_vv_u8m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vv_u8m4_vl(vuint8m4_t arg_1, vuint8m4_t arg_2, size_t arg_3) { + return vadd_vv_u8m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vv_u8m4_m_vl(vbool2_t arg_1, vuint8m4_t arg_2, vuint8m4_t arg_3, vuint8m4_t arg_4, size_t arg_5) { + return vadd_vv_u8m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vv_u8m8_vl(vuint8m8_t arg_1, vuint8m8_t arg_2, size_t arg_3) { + return vadd_vv_u8m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vv_u8m8_m_vl(vbool1_t arg_1, vuint8m8_t arg_2, vuint8m8_t arg_3, vuint8m8_t arg_4, size_t arg_5) { + return vadd_vv_u8m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vv_u8mf2_vl(vuint8mf2_t arg_1, vuint8mf2_t arg_2, size_t arg_3) { + return vadd_vv_u8mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vv_u8mf2_m_vl(vbool16_t arg_1, vuint8mf2_t arg_2, vuint8mf2_t arg_3, vuint8mf2_t arg_4, size_t arg_5) { + return vadd_vv_u8mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vv_u8mf4_vl(vuint8mf4_t arg_1, vuint8mf4_t arg_2, size_t arg_3) { + return vadd_vv_u8mf4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vv_u8mf4_m_vl(vbool32_t arg_1, vuint8mf4_t arg_2, vuint8mf4_t arg_3, vuint8mf4_t arg_4, size_t arg_5) { + return vadd_vv_u8mf4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vv_u8mf8_vl(vuint8mf8_t arg_1, vuint8mf8_t arg_2, size_t arg_3) { + return vadd_vv_u8mf8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u8mf8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vv_u8mf8_m_vl(vbool64_t arg_1, vuint8mf8_t arg_2, vuint8mf8_t arg_3, vuint8mf8_t arg_4, size_t arg_5) { + return vadd_vv_u8mf8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vv_u16m1_vl(vuint16m1_t arg_1, vuint16m1_t arg_2, size_t arg_3) { + return vadd_vv_u16m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vv_u16m1_m_vl(vbool16_t arg_1, vuint16m1_t arg_2, vuint16m1_t arg_3, vuint16m1_t arg_4, size_t arg_5) { + return vadd_vv_u16m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vv_u16m2_vl(vuint16m2_t arg_1, vuint16m2_t arg_2, size_t arg_3) { + return vadd_vv_u16m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vv_u16m2_m_vl(vbool8_t arg_1, vuint16m2_t arg_2, vuint16m2_t arg_3, vuint16m2_t arg_4, size_t arg_5) { + return vadd_vv_u16m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vv_u16m4_vl(vuint16m4_t arg_1, vuint16m4_t arg_2, size_t arg_3) { + return vadd_vv_u16m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vv_u16m4_m_vl(vbool4_t arg_1, vuint16m4_t arg_2, vuint16m4_t arg_3, vuint16m4_t arg_4, size_t arg_5) { + return vadd_vv_u16m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vv_u16m8_vl(vuint16m8_t arg_1, vuint16m8_t arg_2, size_t arg_3) { + return vadd_vv_u16m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vv_u16m8_m_vl(vbool2_t arg_1, vuint16m8_t arg_2, vuint16m8_t arg_3, vuint16m8_t arg_4, size_t arg_5) { + return vadd_vv_u16m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vv_u16mf2_vl(vuint16mf2_t arg_1, vuint16mf2_t arg_2, size_t arg_3) { + return vadd_vv_u16mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vv_u16mf2_m_vl(vbool32_t arg_1, vuint16mf2_t arg_2, vuint16mf2_t arg_3, vuint16mf2_t arg_4, size_t arg_5) { + return vadd_vv_u16mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vv_u16mf4_vl(vuint16mf4_t arg_1, vuint16mf4_t arg_2, size_t arg_3) { + return vadd_vv_u16mf4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u16mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vv_u16mf4_m_vl(vbool64_t arg_1, vuint16mf4_t arg_2, vuint16mf4_t arg_3, vuint16mf4_t arg_4, size_t arg_5) { + return vadd_vv_u16mf4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vv_u32m1_vl(vuint32m1_t arg_1, vuint32m1_t arg_2, size_t arg_3) { + return vadd_vv_u32m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vv_u32m1_m_vl(vbool32_t arg_1, vuint32m1_t arg_2, vuint32m1_t arg_3, vuint32m1_t arg_4, size_t arg_5) { + return vadd_vv_u32m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vv_u32m2_vl(vuint32m2_t arg_1, vuint32m2_t arg_2, size_t arg_3) { + return vadd_vv_u32m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vv_u32m2_m_vl(vbool16_t arg_1, vuint32m2_t arg_2, vuint32m2_t arg_3, vuint32m2_t arg_4, size_t arg_5) { + return vadd_vv_u32m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vv_u32m4_vl(vuint32m4_t arg_1, vuint32m4_t arg_2, size_t arg_3) { + return vadd_vv_u32m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vv_u32m4_m_vl(vbool8_t arg_1, vuint32m4_t arg_2, vuint32m4_t arg_3, vuint32m4_t arg_4, size_t arg_5) { + return vadd_vv_u32m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vv_u32m8_vl(vuint32m8_t arg_1, vuint32m8_t arg_2, size_t arg_3) { + return vadd_vv_u32m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vv_u32m8_m_vl(vbool4_t arg_1, vuint32m8_t arg_2, vuint32m8_t arg_3, vuint32m8_t arg_4, size_t arg_5) { + return vadd_vv_u32m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vv_u32mf2_vl(vuint32mf2_t arg_1, vuint32mf2_t arg_2, size_t arg_3) { + return vadd_vv_u32mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vv_u32mf2_m_vl(vbool64_t arg_1, vuint32mf2_t arg_2, vuint32mf2_t arg_3, vuint32mf2_t arg_4, size_t arg_5) { + return vadd_vv_u32mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vv_u64m1_vl(vuint64m1_t arg_1, vuint64m1_t arg_2, size_t arg_3) { + return vadd_vv_u64m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vv_u64m1_m_vl(vbool64_t arg_1, vuint64m1_t arg_2, vuint64m1_t arg_3, vuint64m1_t arg_4, size_t arg_5) { + return vadd_vv_u64m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vv_u64m2_vl(vuint64m2_t arg_1, vuint64m2_t arg_2, size_t arg_3) { + return vadd_vv_u64m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vv_u64m2_m_vl(vbool32_t arg_1, vuint64m2_t arg_2, vuint64m2_t arg_3, vuint64m2_t arg_4, size_t arg_5) { + return vadd_vv_u64m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vv_u64m4_vl(vuint64m4_t arg_1, vuint64m4_t arg_2, size_t arg_3) { + return vadd_vv_u64m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vv_u64m4_m_vl(vbool16_t arg_1, vuint64m4_t arg_2, vuint64m4_t arg_3, vuint64m4_t arg_4, size_t arg_5) { + return vadd_vv_u64m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vv_u64m8_vl(vuint64m8_t arg_1, vuint64m8_t arg_2, size_t arg_3) { + return vadd_vv_u64m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vv_u64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vv_u64m8_m_vl(vbool8_t arg_1, vuint64m8_t arg_2, vuint64m8_t arg_3, vuint64m8_t arg_4, size_t arg_5) { + return vadd_vv_u64m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vx_u8m1_vl(vuint8m1_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vx_u8m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vx_u8m1_m_vl(vbool8_t arg_1, vuint8m1_t arg_2, vuint8m1_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_vx_u8m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vx_u8m2_vl(vuint8m2_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vx_u8m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vx_u8m2_m_vl(vbool4_t arg_1, vuint8m2_t arg_2, vuint8m2_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_vx_u8m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vx_u8m4_vl(vuint8m4_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vx_u8m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vx_u8m4_m_vl(vbool2_t arg_1, vuint8m4_t arg_2, vuint8m4_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_vx_u8m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vx_u8m8_vl(vuint8m8_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vx_u8m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vx_u8m8_m_vl(vbool1_t arg_1, vuint8m8_t arg_2, vuint8m8_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_vx_u8m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vx_u8mf2_vl(vuint8mf2_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vx_u8mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vx_u8mf2_m_vl(vbool16_t arg_1, vuint8mf2_t arg_2, vuint8mf2_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_vx_u8mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vx_u8mf4_vl(vuint8mf4_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vx_u8mf4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vx_u8mf4_m_vl(vbool32_t arg_1, vuint8mf4_t arg_2, vuint8mf4_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_vx_u8mf4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i32( [[ARG_1:%.*]], i8 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vx_u8mf8_vl(vuint8mf8_t arg_1, uint8_t arg_2, size_t arg_3) { + return vadd_vx_u8mf8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u8mf8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i8 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vx_u8mf8_m_vl(vbool64_t arg_1, vuint8mf8_t arg_2, vuint8mf8_t arg_3, uint8_t arg_4, size_t arg_5) { + return vadd_vx_u8mf8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vx_u16m1_vl(vuint16m1_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vx_u16m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vx_u16m1_m_vl(vbool16_t arg_1, vuint16m1_t arg_2, vuint16m1_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_vx_u16m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vx_u16m2_vl(vuint16m2_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vx_u16m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vx_u16m2_m_vl(vbool8_t arg_1, vuint16m2_t arg_2, vuint16m2_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_vx_u16m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vx_u16m4_vl(vuint16m4_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vx_u16m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vx_u16m4_m_vl(vbool4_t arg_1, vuint16m4_t arg_2, vuint16m4_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_vx_u16m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vx_u16m8_vl(vuint16m8_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vx_u16m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vx_u16m8_m_vl(vbool2_t arg_1, vuint16m8_t arg_2, vuint16m8_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_vx_u16m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vx_u16mf2_vl(vuint16mf2_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vx_u16mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vx_u16mf2_m_vl(vbool32_t arg_1, vuint16mf2_t arg_2, vuint16mf2_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_vx_u16mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i32( [[ARG_1:%.*]], i16 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vx_u16mf4_vl(vuint16mf4_t arg_1, uint16_t arg_2, size_t arg_3) { + return vadd_vx_u16mf4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u16mf4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i16 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vx_u16mf4_m_vl(vbool64_t arg_1, vuint16mf4_t arg_2, vuint16mf4_t arg_3, uint16_t arg_4, size_t arg_5) { + return vadd_vx_u16mf4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vx_u32m1_vl(vuint32m1_t arg_1, uint32_t arg_2, size_t arg_3) { + return vadd_vx_u32m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vx_u32m1_m_vl(vbool32_t arg_1, vuint32m1_t arg_2, vuint32m1_t arg_3, uint32_t arg_4, size_t arg_5) { + return vadd_vx_u32m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vx_u32m2_vl(vuint32m2_t arg_1, uint32_t arg_2, size_t arg_3) { + return vadd_vx_u32m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vx_u32m2_m_vl(vbool16_t arg_1, vuint32m2_t arg_2, vuint32m2_t arg_3, uint32_t arg_4, size_t arg_5) { + return vadd_vx_u32m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vx_u32m4_vl(vuint32m4_t arg_1, uint32_t arg_2, size_t arg_3) { + return vadd_vx_u32m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vx_u32m4_m_vl(vbool8_t arg_1, vuint32m4_t arg_2, vuint32m4_t arg_3, uint32_t arg_4, size_t arg_5) { + return vadd_vx_u32m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vx_u32m8_vl(vuint32m8_t arg_1, uint32_t arg_2, size_t arg_3) { + return vadd_vx_u32m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vx_u32m8_m_vl(vbool4_t arg_1, vuint32m8_t arg_2, vuint32m8_t arg_3, uint32_t arg_4, size_t arg_5) { + return vadd_vx_u32m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i32( [[ARG_1:%.*]], i32 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vx_u32mf2_vl(vuint32mf2_t arg_1, uint32_t arg_2, size_t arg_3) { + return vadd_vx_u32mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i32 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vx_u32mf2_m_vl(vbool64_t arg_1, vuint32mf2_t arg_2, vuint32mf2_t arg_3, uint32_t arg_4, size_t arg_5) { + return vadd_vx_u32mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vx_u64m1_vl(vuint64m1_t arg_1, uint64_t arg_2, size_t arg_3) { + return vadd_vx_u64m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vx_u64m1_m_vl(vbool64_t arg_1, vuint64m1_t arg_2, vuint64m1_t arg_3, uint64_t arg_4, size_t arg_5) { + return vadd_vx_u64m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vx_u64m2_vl(vuint64m2_t arg_1, uint64_t arg_2, size_t arg_3) { + return vadd_vx_u64m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vx_u64m2_m_vl(vbool32_t arg_1, vuint64m2_t arg_2, vuint64m2_t arg_3, uint64_t arg_4, size_t arg_5) { + return vadd_vx_u64m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vx_u64m4_vl(vuint64m4_t arg_1, uint64_t arg_2, size_t arg_3) { + return vadd_vx_u64m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vx_u64m4_m_vl(vbool16_t arg_1, vuint64m4_t arg_2, vuint64m4_t arg_3, uint64_t arg_4, size_t arg_5) { + return vadd_vx_u64m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i32( [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vx_u64m8_vl(vuint64m8_t arg_1, uint64_t arg_2, size_t arg_3) { + return vadd_vx_u64m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vadd_vx_u64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vx_u64m8_m_vl(vbool8_t arg_1, vuint64m8_t arg_2, vuint64m8_t arg_3, uint64_t arg_4, size_t arg_5) { + return vadd_vx_u64m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + diff --git a/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics/vfadd.c b/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics/vfadd.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics/vfadd.c @@ -0,0 +1,518 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t +// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t + +// NOTE: This file is autogenerated by 'bin/clang-tblgen -gen-riscv-vector-test clang/include/clang/Basic/riscv_vector.td -D=' + +// ASM-NOT: warning +#include + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_vl(vfloat32m1_t arg_1, vfloat32m1_t arg_2, size_t arg_3) { + return vfadd_vv_f32m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_m_vl(vbool32_t arg_1, vfloat32m1_t arg_2, vfloat32m1_t arg_3, vfloat32m1_t arg_4, size_t arg_5) { + return vfadd_vv_f32m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_vl(vfloat32m2_t arg_1, vfloat32m2_t arg_2, size_t arg_3) { + return vfadd_vv_f32m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_m_vl(vbool16_t arg_1, vfloat32m2_t arg_2, vfloat32m2_t arg_3, vfloat32m2_t arg_4, size_t arg_5) { + return vfadd_vv_f32m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_vl(vfloat32m4_t arg_1, vfloat32m4_t arg_2, size_t arg_3) { + return vfadd_vv_f32m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_m_vl(vbool8_t arg_1, vfloat32m4_t arg_2, vfloat32m4_t arg_3, vfloat32m4_t arg_4, size_t arg_5) { + return vfadd_vv_f32m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_vl(vfloat32m8_t arg_1, vfloat32m8_t arg_2, size_t arg_3) { + return vfadd_vv_f32m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_m_vl(vbool4_t arg_1, vfloat32m8_t arg_2, vfloat32m8_t arg_3, vfloat32m8_t arg_4, size_t arg_5) { + return vfadd_vv_f32m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_vl(vfloat32mf2_t arg_1, vfloat32mf2_t arg_2, size_t arg_3) { + return vfadd_vv_f32mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_m_vl(vbool64_t arg_1, vfloat32mf2_t arg_2, vfloat32mf2_t arg_3, vfloat32mf2_t arg_4, size_t arg_5) { + return vfadd_vv_f32mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_vl(vfloat64m1_t arg_1, vfloat64m1_t arg_2, size_t arg_3) { + return vfadd_vv_f64m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_m_vl(vbool64_t arg_1, vfloat64m1_t arg_2, vfloat64m1_t arg_3, vfloat64m1_t arg_4, size_t arg_5) { + return vfadd_vv_f64m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_vl(vfloat64m2_t arg_1, vfloat64m2_t arg_2, size_t arg_3) { + return vfadd_vv_f64m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_m_vl(vbool32_t arg_1, vfloat64m2_t arg_2, vfloat64m2_t arg_3, vfloat64m2_t arg_4, size_t arg_5) { + return vfadd_vv_f64m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_vl(vfloat64m4_t arg_1, vfloat64m4_t arg_2, size_t arg_3) { + return vfadd_vv_f64m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_m_vl(vbool16_t arg_1, vfloat64m4_t arg_2, vfloat64m4_t arg_3, vfloat64m4_t arg_4, size_t arg_5) { + return vfadd_vv_f64m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_vl(vfloat64m8_t arg_1, vfloat64m8_t arg_2, size_t arg_3) { + return vfadd_vv_f64m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vv_f64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_m_vl(vbool8_t arg_1, vfloat64m8_t arg_2, vfloat64m8_t arg_3, vfloat64m8_t arg_4, size_t arg_5) { + return vfadd_vv_f64m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[ARG_1:%.*]], float [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i32( [[ARG_1:%.*]], float [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_vl(vfloat32m1_t arg_1, float32_t arg_2, size_t arg_3) { + return vfadd_vf_f32m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_m_vl(vbool32_t arg_1, vfloat32m1_t arg_2, vfloat32m1_t arg_3, float32_t arg_4, size_t arg_5) { + return vfadd_vf_f32m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[ARG_1:%.*]], float [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i32( [[ARG_1:%.*]], float [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_vl(vfloat32m2_t arg_1, float32_t arg_2, size_t arg_3) { + return vfadd_vf_f32m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_m_vl(vbool16_t arg_1, vfloat32m2_t arg_2, vfloat32m2_t arg_3, float32_t arg_4, size_t arg_5) { + return vfadd_vf_f32m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[ARG_1:%.*]], float [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i32( [[ARG_1:%.*]], float [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_vl(vfloat32m4_t arg_1, float32_t arg_2, size_t arg_3) { + return vfadd_vf_f32m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_m_vl(vbool8_t arg_1, vfloat32m4_t arg_2, vfloat32m4_t arg_3, float32_t arg_4, size_t arg_5) { + return vfadd_vf_f32m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[ARG_1:%.*]], float [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i32( [[ARG_1:%.*]], float [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_vl(vfloat32m8_t arg_1, float32_t arg_2, size_t arg_3) { + return vfadd_vf_f32m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f32m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_m_vl(vbool4_t arg_1, vfloat32m8_t arg_2, vfloat32m8_t arg_3, float32_t arg_4, size_t arg_5) { + return vfadd_vf_f32m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[ARG_1:%.*]], float [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i32( [[ARG_1:%.*]], float [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_vl(vfloat32mf2_t arg_1, float32_t arg_2, size_t arg_3) { + return vfadd_vf_f32mf2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], float [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_m_vl(vbool64_t arg_1, vfloat32mf2_t arg_2, vfloat32mf2_t arg_3, float32_t arg_4, size_t arg_5) { + return vfadd_vf_f32mf2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[ARG_1:%.*]], double [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m1_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i32( [[ARG_1:%.*]], double [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_vl(vfloat64m1_t arg_1, float64_t arg_2, size_t arg_3) { + return vfadd_vf_f64m1_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m1_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_m_vl(vbool64_t arg_1, vfloat64m1_t arg_2, vfloat64m1_t arg_3, float64_t arg_4, size_t arg_5) { + return vfadd_vf_f64m1_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[ARG_1:%.*]], double [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m2_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i32( [[ARG_1:%.*]], double [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_vl(vfloat64m2_t arg_1, float64_t arg_2, size_t arg_3) { + return vfadd_vf_f64m2_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m2_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_m_vl(vbool32_t arg_1, vfloat64m2_t arg_2, vfloat64m2_t arg_3, float64_t arg_4, size_t arg_5) { + return vfadd_vf_f64m2_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[ARG_1:%.*]], double [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m4_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i32( [[ARG_1:%.*]], double [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_vl(vfloat64m4_t arg_1, float64_t arg_2, size_t arg_3) { + return vfadd_vf_f64m4_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m4_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_m_vl(vbool16_t arg_1, vfloat64m4_t arg_2, vfloat64m4_t arg_3, float64_t arg_4, size_t arg_5) { + return vfadd_vf_f64m4_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[ARG_1:%.*]], double [[ARG_2:%.*]], i64 [[ARG_3:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m8_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i32( [[ARG_1:%.*]], double [[ARG_2:%.*]], i32 [[ARG_3:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_vl(vfloat64m8_t arg_1, float64_t arg_2, size_t arg_3) { + return vfadd_vf_f64m8_vl(arg_1, arg_2, arg_3); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m_vl( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i64 [[ARG_5:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +// CHECK-RV32-LABEL: @test_vfadd_vf_f64m8_m_vl( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i32( [[ARG_2:%.*]], [[ARG_3:%.*]], double [[ARG_4:%.*]], [[ARG_1:%.*]], i32 [[ARG_5:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_m_vl(vbool8_t arg_1, vfloat64m8_t arg_2, vfloat64m8_t arg_3, float64_t arg_4, size_t arg_5) { + return vfadd_vf_f64m8_m_vl(arg_1, arg_2, arg_3, arg_4, arg_5); +} + diff --git a/clang/test/CodeGen/RISCV/vadd.c b/clang/test/CodeGen/RISCV/vadd.c deleted file mode 100644 --- a/clang/test/CodeGen/RISCV/vadd.c +++ /dev/null @@ -1,2648 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py -// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v \ -// RUN: -O2 -emit-llvm %s -o - | FileCheck --check-prefix=CHECK-RV64-O2 %s -// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v \ -// RUN: -O2 -emit-llvm %s -o - | FileCheck --check-prefix=CHECK-RV32-O2 %s - -#include -#include - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m1_t test_vadd_vv_i8m1_vl(__rvv_int8m1_t arg_0, __rvv_int8m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i8m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m1_t test_vadd_vv_i8m1_m_vl(__rvv_bool8_t arg_0, __rvv_int8m1_t arg_1, __rvv_int8m1_t arg_2, __rvv_int8m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i8m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m1_t test_vadd_vv_i16m1_vl(__rvv_int16m1_t arg_0, __rvv_int16m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i16m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m1_t test_vadd_vv_i16m1_m_vl(__rvv_bool16_t arg_0, __rvv_int16m1_t arg_1, __rvv_int16m1_t arg_2, __rvv_int16m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i16m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m1_t test_vadd_vv_i32m1_vl(__rvv_int32m1_t arg_0, __rvv_int32m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i32m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m1_t test_vadd_vv_i32m1_m_vl(__rvv_bool32_t arg_0, __rvv_int32m1_t arg_1, __rvv_int32m1_t arg_2, __rvv_int32m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i32m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m1_t test_vadd_vv_i64m1_vl(__rvv_int64m1_t arg_0, __rvv_int64m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i64m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m1_t test_vadd_vv_i64m1_m_vl(__rvv_bool64_t arg_0, __rvv_int64m1_t arg_1, __rvv_int64m1_t arg_2, __rvv_int64m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i64m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m2_t test_vadd_vv_i8m2_vl(__rvv_int8m2_t arg_0, __rvv_int8m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i8m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m2_t test_vadd_vv_i8m2_m_vl(__rvv_bool4_t arg_0, __rvv_int8m2_t arg_1, __rvv_int8m2_t arg_2, __rvv_int8m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i8m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m2_t test_vadd_vv_i16m2_vl(__rvv_int16m2_t arg_0, __rvv_int16m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i16m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m2_t test_vadd_vv_i16m2_m_vl(__rvv_bool8_t arg_0, __rvv_int16m2_t arg_1, __rvv_int16m2_t arg_2, __rvv_int16m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i16m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m2_t test_vadd_vv_i32m2_vl(__rvv_int32m2_t arg_0, __rvv_int32m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i32m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m2_t test_vadd_vv_i32m2_m_vl(__rvv_bool16_t arg_0, __rvv_int32m2_t arg_1, __rvv_int32m2_t arg_2, __rvv_int32m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i32m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m2_t test_vadd_vv_i64m2_vl(__rvv_int64m2_t arg_0, __rvv_int64m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i64m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m2_t test_vadd_vv_i64m2_m_vl(__rvv_bool32_t arg_0, __rvv_int64m2_t arg_1, __rvv_int64m2_t arg_2, __rvv_int64m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i64m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m4_t test_vadd_vv_i8m4_vl(__rvv_int8m4_t arg_0, __rvv_int8m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i8m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m4_t test_vadd_vv_i8m4_m_vl(__rvv_bool2_t arg_0, __rvv_int8m4_t arg_1, __rvv_int8m4_t arg_2, __rvv_int8m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i8m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m4_t test_vadd_vv_i16m4_vl(__rvv_int16m4_t arg_0, __rvv_int16m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i16m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m4_t test_vadd_vv_i16m4_m_vl(__rvv_bool4_t arg_0, __rvv_int16m4_t arg_1, __rvv_int16m4_t arg_2, __rvv_int16m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i16m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m4_t test_vadd_vv_i32m4_vl(__rvv_int32m4_t arg_0, __rvv_int32m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i32m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m4_t test_vadd_vv_i32m4_m_vl(__rvv_bool8_t arg_0, __rvv_int32m4_t arg_1, __rvv_int32m4_t arg_2, __rvv_int32m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i32m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m4_t test_vadd_vv_i64m4_vl(__rvv_int64m4_t arg_0, __rvv_int64m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i64m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m4_t test_vadd_vv_i64m4_m_vl(__rvv_bool16_t arg_0, __rvv_int64m4_t arg_1, __rvv_int64m4_t arg_2, __rvv_int64m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i64m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m8_t test_vadd_vv_i8m8_vl(__rvv_int8m8_t arg_0, __rvv_int8m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i8m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m8_t test_vadd_vv_i8m8_m_vl(__rvv_bool1_t arg_0, __rvv_int8m8_t arg_1, __rvv_int8m8_t arg_2, __rvv_int8m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i8m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m8_t test_vadd_vv_i16m8_vl(__rvv_int16m8_t arg_0, __rvv_int16m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i16m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m8_t test_vadd_vv_i16m8_m_vl(__rvv_bool2_t arg_0, __rvv_int16m8_t arg_1, __rvv_int16m8_t arg_2, __rvv_int16m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i16m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m8_t test_vadd_vv_i32m8_vl(__rvv_int32m8_t arg_0, __rvv_int32m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i32m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m8_t test_vadd_vv_i32m8_m_vl(__rvv_bool4_t arg_0, __rvv_int32m8_t arg_1, __rvv_int32m8_t arg_2, __rvv_int32m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i32m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m8_t test_vadd_vv_i64m8_vl(__rvv_int64m8_t arg_0, __rvv_int64m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i64m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m8_t test_vadd_vv_i64m8_m_vl(__rvv_bool8_t arg_0, __rvv_int64m8_t arg_1, __rvv_int64m8_t arg_2, __rvv_int64m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i64m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf2_t test_vadd_vv_i8mf2_vl(__rvv_int8mf2_t arg_0, __rvv_int8mf2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i8mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf2_t test_vadd_vv_i8mf2_m_vl(__rvv_bool16_t arg_0, __rvv_int8mf2_t arg_1, __rvv_int8mf2_t arg_2, __rvv_int8mf2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i8mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf2_t test_vadd_vv_i16mf2_vl(__rvv_int16mf2_t arg_0, __rvv_int16mf2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i16mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf2_t test_vadd_vv_i16mf2_m_vl(__rvv_bool32_t arg_0, __rvv_int16mf2_t arg_1, __rvv_int16mf2_t arg_2, __rvv_int16mf2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i16mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32mf2_t test_vadd_vv_i32mf2_vl(__rvv_int32mf2_t arg_0, __rvv_int32mf2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i32mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32mf2_t test_vadd_vv_i32mf2_m_vl(__rvv_bool64_t arg_0, __rvv_int32mf2_t arg_1, __rvv_int32mf2_t arg_2, __rvv_int32mf2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i32mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf4_t test_vadd_vv_i8mf4_vl(__rvv_int8mf4_t arg_0, __rvv_int8mf4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i8mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf4_t test_vadd_vv_i8mf4_m_vl(__rvv_bool32_t arg_0, __rvv_int8mf4_t arg_1, __rvv_int8mf4_t arg_2, __rvv_int8mf4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i8mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf4_t test_vadd_vv_i16mf4_vl(__rvv_int16mf4_t arg_0, __rvv_int16mf4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i16mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf4_t test_vadd_vv_i16mf4_m_vl(__rvv_bool64_t arg_0, __rvv_int16mf4_t arg_1, __rvv_int16mf4_t arg_2, __rvv_int16mf4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i16mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf8_t test_vadd_vv_i8mf8_vl(__rvv_int8mf8_t arg_0, __rvv_int8mf8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_i8mf8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf8_t test_vadd_vv_i8mf8_m_vl(__rvv_bool64_t arg_0, __rvv_int8mf8_t arg_1, __rvv_int8mf8_t arg_2, __rvv_int8mf8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_i8mf8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m1_t test_vadd_vx_i8m1_vl(__rvv_int8m1_t arg_0, int8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i8m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m1_t test_vadd_vx_i8m1_m_vl(__rvv_bool8_t arg_0, __rvv_int8m1_t arg_1, __rvv_int8m1_t arg_2, int8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i8m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m1_t test_vadd_vx_i16m1_vl(__rvv_int16m1_t arg_0, int16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i16m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m1_t test_vadd_vx_i16m1_m_vl(__rvv_bool16_t arg_0, __rvv_int16m1_t arg_1, __rvv_int16m1_t arg_2, int16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i16m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m1_t test_vadd_vx_i32m1_vl(__rvv_int32m1_t arg_0, int32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i32m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m1_t test_vadd_vx_i32m1_m_vl(__rvv_bool32_t arg_0, __rvv_int32m1_t arg_1, __rvv_int32m1_t arg_2, int32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i32m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m1_t test_vadd_vx_i64m1_vl(__rvv_int64m1_t arg_0, int64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i64m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m1_t test_vadd_vx_i64m1_m_vl(__rvv_bool64_t arg_0, __rvv_int64m1_t arg_1, __rvv_int64m1_t arg_2, int64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i64m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m2_t test_vadd_vx_i8m2_vl(__rvv_int8m2_t arg_0, int8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i8m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m2_t test_vadd_vx_i8m2_m_vl(__rvv_bool4_t arg_0, __rvv_int8m2_t arg_1, __rvv_int8m2_t arg_2, int8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i8m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m2_t test_vadd_vx_i16m2_vl(__rvv_int16m2_t arg_0, int16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i16m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m2_t test_vadd_vx_i16m2_m_vl(__rvv_bool8_t arg_0, __rvv_int16m2_t arg_1, __rvv_int16m2_t arg_2, int16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i16m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m2_t test_vadd_vx_i32m2_vl(__rvv_int32m2_t arg_0, int32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i32m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m2_t test_vadd_vx_i32m2_m_vl(__rvv_bool16_t arg_0, __rvv_int32m2_t arg_1, __rvv_int32m2_t arg_2, int32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i32m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m2_t test_vadd_vx_i64m2_vl(__rvv_int64m2_t arg_0, int64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i64m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m2_t test_vadd_vx_i64m2_m_vl(__rvv_bool32_t arg_0, __rvv_int64m2_t arg_1, __rvv_int64m2_t arg_2, int64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i64m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m4_t test_vadd_vx_i8m4_vl(__rvv_int8m4_t arg_0, int8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i8m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m4_t test_vadd_vx_i8m4_m_vl(__rvv_bool2_t arg_0, __rvv_int8m4_t arg_1, __rvv_int8m4_t arg_2, int8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i8m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m4_t test_vadd_vx_i16m4_vl(__rvv_int16m4_t arg_0, int16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i16m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m4_t test_vadd_vx_i16m4_m_vl(__rvv_bool4_t arg_0, __rvv_int16m4_t arg_1, __rvv_int16m4_t arg_2, int16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i16m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m4_t test_vadd_vx_i32m4_vl(__rvv_int32m4_t arg_0, int32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i32m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m4_t test_vadd_vx_i32m4_m_vl(__rvv_bool8_t arg_0, __rvv_int32m4_t arg_1, __rvv_int32m4_t arg_2, int32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i32m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m4_t test_vadd_vx_i64m4_vl(__rvv_int64m4_t arg_0, int64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i64m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m4_t test_vadd_vx_i64m4_m_vl(__rvv_bool16_t arg_0, __rvv_int64m4_t arg_1, __rvv_int64m4_t arg_2, int64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i64m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m8_t test_vadd_vx_i8m8_vl(__rvv_int8m8_t arg_0, int8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i8m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8m8_t test_vadd_vx_i8m8_m_vl(__rvv_bool1_t arg_0, __rvv_int8m8_t arg_1, __rvv_int8m8_t arg_2, int8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i8m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m8_t test_vadd_vx_i16m8_vl(__rvv_int16m8_t arg_0, int16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i16m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16m8_t test_vadd_vx_i16m8_m_vl(__rvv_bool2_t arg_0, __rvv_int16m8_t arg_1, __rvv_int16m8_t arg_2, int16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i16m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m8_t test_vadd_vx_i32m8_vl(__rvv_int32m8_t arg_0, int32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i32m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32m8_t test_vadd_vx_i32m8_m_vl(__rvv_bool4_t arg_0, __rvv_int32m8_t arg_1, __rvv_int32m8_t arg_2, int32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i32m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m8_t test_vadd_vx_i64m8_vl(__rvv_int64m8_t arg_0, int64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i64m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int64m8_t test_vadd_vx_i64m8_m_vl(__rvv_bool8_t arg_0, __rvv_int64m8_t arg_1, __rvv_int64m8_t arg_2, int64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i64m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf2_t test_vadd_vx_i8mf2_vl(__rvv_int8mf2_t arg_0, int8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i8mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf2_t test_vadd_vx_i8mf2_m_vl(__rvv_bool16_t arg_0, __rvv_int8mf2_t arg_1, __rvv_int8mf2_t arg_2, int8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i8mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf2_t test_vadd_vx_i16mf2_vl(__rvv_int16mf2_t arg_0, int16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i16mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf2_t test_vadd_vx_i16mf2_m_vl(__rvv_bool32_t arg_0, __rvv_int16mf2_t arg_1, __rvv_int16mf2_t arg_2, int16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i16mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32mf2_t test_vadd_vx_i32mf2_vl(__rvv_int32mf2_t arg_0, int32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i32mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int32mf2_t test_vadd_vx_i32mf2_m_vl(__rvv_bool64_t arg_0, __rvv_int32mf2_t arg_1, __rvv_int32mf2_t arg_2, int32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i32mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf4_t test_vadd_vx_i8mf4_vl(__rvv_int8mf4_t arg_0, int8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i8mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf4_t test_vadd_vx_i8mf4_m_vl(__rvv_bool32_t arg_0, __rvv_int8mf4_t arg_1, __rvv_int8mf4_t arg_2, int8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i8mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf4_t test_vadd_vx_i16mf4_vl(__rvv_int16mf4_t arg_0, int16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i16mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int16mf4_t test_vadd_vx_i16mf4_m_vl(__rvv_bool64_t arg_0, __rvv_int16mf4_t arg_1, __rvv_int16mf4_t arg_2, int16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i16mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf8_t test_vadd_vx_i8mf8_vl(__rvv_int8mf8_t arg_0, int8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_i8mf8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_int8mf8_t test_vadd_vx_i8mf8_m_vl(__rvv_bool64_t arg_0, __rvv_int8mf8_t arg_1, __rvv_int8mf8_t arg_2, int8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_i8mf8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m1_t test_vadd_vv_u8m1_vl(__rvv_uint8m1_t arg_0, __rvv_uint8m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u8m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m1_t test_vadd_vv_u8m1_m_vl(__rvv_bool8_t arg_0, __rvv_uint8m1_t arg_1, __rvv_uint8m1_t arg_2, __rvv_uint8m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u8m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m1_t test_vadd_vv_u16m1_vl(__rvv_uint16m1_t arg_0, __rvv_uint16m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u16m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m1_t test_vadd_vv_u16m1_m_vl(__rvv_bool16_t arg_0, __rvv_uint16m1_t arg_1, __rvv_uint16m1_t arg_2, __rvv_uint16m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u16m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m1_t test_vadd_vv_u32m1_vl(__rvv_uint32m1_t arg_0, __rvv_uint32m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u32m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m1_t test_vadd_vv_u32m1_m_vl(__rvv_bool32_t arg_0, __rvv_uint32m1_t arg_1, __rvv_uint32m1_t arg_2, __rvv_uint32m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u32m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m1_t test_vadd_vv_u64m1_vl(__rvv_uint64m1_t arg_0, __rvv_uint64m1_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u64m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m1_t test_vadd_vv_u64m1_m_vl(__rvv_bool64_t arg_0, __rvv_uint64m1_t arg_1, __rvv_uint64m1_t arg_2, __rvv_uint64m1_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u64m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m2_t test_vadd_vv_u8m2_vl(__rvv_uint8m2_t arg_0, __rvv_uint8m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u8m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m2_t test_vadd_vv_u8m2_m_vl(__rvv_bool4_t arg_0, __rvv_uint8m2_t arg_1, __rvv_uint8m2_t arg_2, __rvv_uint8m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u8m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m2_t test_vadd_vv_u16m2_vl(__rvv_uint16m2_t arg_0, __rvv_uint16m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u16m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m2_t test_vadd_vv_u16m2_m_vl(__rvv_bool8_t arg_0, __rvv_uint16m2_t arg_1, __rvv_uint16m2_t arg_2, __rvv_uint16m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u16m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m2_t test_vadd_vv_u32m2_vl(__rvv_uint32m2_t arg_0, __rvv_uint32m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u32m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m2_t test_vadd_vv_u32m2_m_vl(__rvv_bool16_t arg_0, __rvv_uint32m2_t arg_1, __rvv_uint32m2_t arg_2, __rvv_uint32m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u32m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m2_t test_vadd_vv_u64m2_vl(__rvv_uint64m2_t arg_0, __rvv_uint64m2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u64m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m2_t test_vadd_vv_u64m2_m_vl(__rvv_bool32_t arg_0, __rvv_uint64m2_t arg_1, __rvv_uint64m2_t arg_2, __rvv_uint64m2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u64m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m4_t test_vadd_vv_u8m4_vl(__rvv_uint8m4_t arg_0, __rvv_uint8m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u8m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m4_t test_vadd_vv_u8m4_m_vl(__rvv_bool2_t arg_0, __rvv_uint8m4_t arg_1, __rvv_uint8m4_t arg_2, __rvv_uint8m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u8m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m4_t test_vadd_vv_u16m4_vl(__rvv_uint16m4_t arg_0, __rvv_uint16m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u16m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m4_t test_vadd_vv_u16m4_m_vl(__rvv_bool4_t arg_0, __rvv_uint16m4_t arg_1, __rvv_uint16m4_t arg_2, __rvv_uint16m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u16m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m4_t test_vadd_vv_u32m4_vl(__rvv_uint32m4_t arg_0, __rvv_uint32m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u32m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m4_t test_vadd_vv_u32m4_m_vl(__rvv_bool8_t arg_0, __rvv_uint32m4_t arg_1, __rvv_uint32m4_t arg_2, __rvv_uint32m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u32m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m4_t test_vadd_vv_u64m4_vl(__rvv_uint64m4_t arg_0, __rvv_uint64m4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u64m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m4_t test_vadd_vv_u64m4_m_vl(__rvv_bool16_t arg_0, __rvv_uint64m4_t arg_1, __rvv_uint64m4_t arg_2, __rvv_uint64m4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u64m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m8_t test_vadd_vv_u8m8_vl(__rvv_uint8m8_t arg_0, __rvv_uint8m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u8m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m8_t test_vadd_vv_u8m8_m_vl(__rvv_bool1_t arg_0, __rvv_uint8m8_t arg_1, __rvv_uint8m8_t arg_2, __rvv_uint8m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u8m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m8_t test_vadd_vv_u16m8_vl(__rvv_uint16m8_t arg_0, __rvv_uint16m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u16m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m8_t test_vadd_vv_u16m8_m_vl(__rvv_bool2_t arg_0, __rvv_uint16m8_t arg_1, __rvv_uint16m8_t arg_2, __rvv_uint16m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u16m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m8_t test_vadd_vv_u32m8_vl(__rvv_uint32m8_t arg_0, __rvv_uint32m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u32m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m8_t test_vadd_vv_u32m8_m_vl(__rvv_bool4_t arg_0, __rvv_uint32m8_t arg_1, __rvv_uint32m8_t arg_2, __rvv_uint32m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u32m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m8_t test_vadd_vv_u64m8_vl(__rvv_uint64m8_t arg_0, __rvv_uint64m8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u64m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m8_t test_vadd_vv_u64m8_m_vl(__rvv_bool8_t arg_0, __rvv_uint64m8_t arg_1, __rvv_uint64m8_t arg_2, __rvv_uint64m8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u64m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf2_t test_vadd_vv_u8mf2_vl(__rvv_uint8mf2_t arg_0, __rvv_uint8mf2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u8mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf2_t test_vadd_vv_u8mf2_m_vl(__rvv_bool16_t arg_0, __rvv_uint8mf2_t arg_1, __rvv_uint8mf2_t arg_2, __rvv_uint8mf2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u8mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf2_t test_vadd_vv_u16mf2_vl(__rvv_uint16mf2_t arg_0, __rvv_uint16mf2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u16mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf2_t test_vadd_vv_u16mf2_m_vl(__rvv_bool32_t arg_0, __rvv_uint16mf2_t arg_1, __rvv_uint16mf2_t arg_2, __rvv_uint16mf2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u16mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32mf2_t test_vadd_vv_u32mf2_vl(__rvv_uint32mf2_t arg_0, __rvv_uint32mf2_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u32mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32mf2_t test_vadd_vv_u32mf2_m_vl(__rvv_bool64_t arg_0, __rvv_uint32mf2_t arg_1, __rvv_uint32mf2_t arg_2, __rvv_uint32mf2_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u32mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf4_t test_vadd_vv_u8mf4_vl(__rvv_uint8mf4_t arg_0, __rvv_uint8mf4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u8mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf4_t test_vadd_vv_u8mf4_m_vl(__rvv_bool32_t arg_0, __rvv_uint8mf4_t arg_1, __rvv_uint8mf4_t arg_2, __rvv_uint8mf4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u8mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf4_t test_vadd_vv_u16mf4_vl(__rvv_uint16mf4_t arg_0, __rvv_uint16mf4_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u16mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf4_t test_vadd_vv_u16mf4_m_vl(__rvv_bool64_t arg_0, __rvv_uint16mf4_t arg_1, __rvv_uint16mf4_t arg_2, __rvv_uint16mf4_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u16mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[ARG_0:%.*]], [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[ARG_0:%.*]], [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf8_t test_vadd_vv_u8mf8_vl(__rvv_uint8mf8_t arg_0, __rvv_uint8mf8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vv_u8mf8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf8_t test_vadd_vv_u8mf8_m_vl(__rvv_bool64_t arg_0, __rvv_uint8mf8_t arg_1, __rvv_uint8mf8_t arg_2, __rvv_uint8mf8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vv_u8mf8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m1_t test_vadd_vx_u8m1_vl(__rvv_uint8m1_t arg_0, uint8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u8m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m1_t test_vadd_vx_u8m1_m_vl(__rvv_bool8_t arg_0, __rvv_uint8m1_t arg_1, __rvv_uint8m1_t arg_2, uint8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u8m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m1_t test_vadd_vx_u16m1_vl(__rvv_uint16m1_t arg_0, uint16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u16m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m1_t test_vadd_vx_u16m1_m_vl(__rvv_bool16_t arg_0, __rvv_uint16m1_t arg_1, __rvv_uint16m1_t arg_2, uint16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u16m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m1_t test_vadd_vx_u32m1_vl(__rvv_uint32m1_t arg_0, uint32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u32m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m1_t test_vadd_vx_u32m1_m_vl(__rvv_bool32_t arg_0, __rvv_uint32m1_t arg_1, __rvv_uint32m1_t arg_2, uint32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u32m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m1_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m1_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m1_t test_vadd_vx_u64m1_vl(__rvv_uint64m1_t arg_0, uint64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u64m1_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m1_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m1_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m1_t test_vadd_vx_u64m1_m_vl(__rvv_bool64_t arg_0, __rvv_uint64m1_t arg_1, __rvv_uint64m1_t arg_2, uint64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u64m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m2_t test_vadd_vx_u8m2_vl(__rvv_uint8m2_t arg_0, uint8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u8m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m2_t test_vadd_vx_u8m2_m_vl(__rvv_bool4_t arg_0, __rvv_uint8m2_t arg_1, __rvv_uint8m2_t arg_2, uint8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u8m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m2_t test_vadd_vx_u16m2_vl(__rvv_uint16m2_t arg_0, uint16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u16m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m2_t test_vadd_vx_u16m2_m_vl(__rvv_bool8_t arg_0, __rvv_uint16m2_t arg_1, __rvv_uint16m2_t arg_2, uint16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u16m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m2_t test_vadd_vx_u32m2_vl(__rvv_uint32m2_t arg_0, uint32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u32m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m2_t test_vadd_vx_u32m2_m_vl(__rvv_bool16_t arg_0, __rvv_uint32m2_t arg_1, __rvv_uint32m2_t arg_2, uint32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u32m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m2_t test_vadd_vx_u64m2_vl(__rvv_uint64m2_t arg_0, uint64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u64m2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m2_t test_vadd_vx_u64m2_m_vl(__rvv_bool32_t arg_0, __rvv_uint64m2_t arg_1, __rvv_uint64m2_t arg_2, uint64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u64m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m4_t test_vadd_vx_u8m4_vl(__rvv_uint8m4_t arg_0, uint8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u8m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m4_t test_vadd_vx_u8m4_m_vl(__rvv_bool2_t arg_0, __rvv_uint8m4_t arg_1, __rvv_uint8m4_t arg_2, uint8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u8m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m4_t test_vadd_vx_u16m4_vl(__rvv_uint16m4_t arg_0, uint16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u16m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m4_t test_vadd_vx_u16m4_m_vl(__rvv_bool4_t arg_0, __rvv_uint16m4_t arg_1, __rvv_uint16m4_t arg_2, uint16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u16m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m4_t test_vadd_vx_u32m4_vl(__rvv_uint32m4_t arg_0, uint32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u32m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m4_t test_vadd_vx_u32m4_m_vl(__rvv_bool8_t arg_0, __rvv_uint32m4_t arg_1, __rvv_uint32m4_t arg_2, uint32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u32m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m4_t test_vadd_vx_u64m4_vl(__rvv_uint64m4_t arg_0, uint64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u64m4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m4_t test_vadd_vx_u64m4_m_vl(__rvv_bool16_t arg_0, __rvv_uint64m4_t arg_1, __rvv_uint64m4_t arg_2, uint64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u64m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv64i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m8_t test_vadd_vx_u8m8_vl(__rvv_uint8m8_t arg_0, uint8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u8m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8m8_t test_vadd_vx_u8m8_m_vl(__rvv_bool1_t arg_0, __rvv_uint8m8_t arg_1, __rvv_uint8m8_t arg_2, uint8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u8m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv32i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m8_t test_vadd_vx_u16m8_vl(__rvv_uint16m8_t arg_0, uint16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u16m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16m8_t test_vadd_vx_u16m8_m_vl(__rvv_bool2_t arg_0, __rvv_uint16m8_t arg_1, __rvv_uint16m8_t arg_2, uint16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u16m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv16i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m8_t test_vadd_vx_u32m8_vl(__rvv_uint32m8_t arg_0, uint32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u32m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32m8_t test_vadd_vx_u32m8_m_vl(__rvv_bool4_t arg_0, __rvv_uint32m8_t arg_1, __rvv_uint32m8_t arg_2, uint32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u32m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.i64.i64( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv8i64.i64.i32( [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m8_t test_vadd_vx_u64m8_vl(__rvv_uint64m8_t arg_0, uint64_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u64m8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i64 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint64m8_t test_vadd_vx_u64m8_m_vl(__rvv_bool8_t arg_0, __rvv_uint64m8_t arg_1, __rvv_uint64m8_t arg_2, uint64_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u64m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv4i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf2_t test_vadd_vx_u8mf2_vl(__rvv_uint8mf2_t arg_0, uint8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u8mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf2_t test_vadd_vx_u8mf2_m_vl(__rvv_bool16_t arg_0, __rvv_uint8mf2_t arg_1, __rvv_uint8mf2_t arg_2, uint8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u8mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf2_t test_vadd_vx_u16mf2_vl(__rvv_uint16mf2_t arg_0, uint16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u16mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf2_t test_vadd_vx_u16mf2_m_vl(__rvv_bool32_t arg_0, __rvv_uint16mf2_t arg_1, __rvv_uint16mf2_t arg_2, uint16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u16mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32mf2_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.i32.i64( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32mf2_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i32.i32.i32( [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32mf2_t test_vadd_vx_u32mf2_vl(__rvv_uint32mf2_t arg_0, uint32_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u32mf2_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32mf2_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32mf2_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i32 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint32mf2_t test_vadd_vx_u32mf2_m_vl(__rvv_bool64_t arg_0, __rvv_uint32mf2_t arg_1, __rvv_uint32mf2_t arg_2, uint32_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u32mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv2i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf4_t test_vadd_vx_u8mf4_vl(__rvv_uint8mf4_t arg_0, uint8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u8mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf4_t test_vadd_vx_u8mf4_m_vl(__rvv_bool32_t arg_0, __rvv_uint8mf4_t arg_1, __rvv_uint8mf4_t arg_2, uint8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u8mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16mf4_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.i16.i64( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16mf4_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i16.i16.i32( [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf4_t test_vadd_vx_u16mf4_vl(__rvv_uint16mf4_t arg_0, uint16_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u16mf4_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16mf4_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16mf4_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i16 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint16mf4_t test_vadd_vx_u16mf4_m_vl(__rvv_bool64_t arg_0, __rvv_uint16mf4_t arg_1, __rvv_uint16mf4_t arg_2, uint16_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u16mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf8_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.i8.i64( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf8_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.nxv1i8.i8.i32( [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf8_t test_vadd_vx_u8mf8_vl(__rvv_uint8mf8_t arg_0, uint8_t arg_1, size_t arg_2) -{ - return __builtin_rvv_vadd_vx_u8mf8_vl(arg_0, arg_1, arg_2); -} - -// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf8_m_vl( -// CHECK-RV64-O2-NEXT: entry: -// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-RV64-O2-NEXT: ret [[TMP0]] -// -// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf8_m_vl( -// CHECK-RV32-O2-NEXT: entry: -// CHECK-RV32-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[ARG_1:%.*]], [[ARG_2:%.*]], i8 [[ARG_3:%.*]], [[ARG_0:%.*]], i32 [[ARG_4:%.*]]) -// CHECK-RV32-O2-NEXT: ret [[TMP0]] -// -__rvv_uint8mf8_t test_vadd_vx_u8mf8_m_vl(__rvv_bool64_t arg_0, __rvv_uint8mf8_t arg_1, __rvv_uint8mf8_t arg_2, uint8_t arg_3, size_t arg_4) -{ - return __builtin_rvv_vadd_vx_u8mf8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4); -} diff --git a/clang/test/Headers/riscv-vector-header.c b/clang/test/Headers/riscv-vector-header.c new file mode 100644 --- /dev/null +++ b/clang/test/Headers/riscv-vector-header.c @@ -0,0 +1,6 @@ +// RUN: %clang_cc1 -triple riscv64 -fsyntax-only \ +// RUN: -target-feature +m -target-feature +a -target-feature +f \ +// RUN: -target-feature +d -target-feature +experimental-v %s +// expected-no-diagnostics + +#include diff --git a/clang/utils/TableGen/CMakeLists.txt b/clang/utils/TableGen/CMakeLists.txt --- a/clang/utils/TableGen/CMakeLists.txt +++ b/clang/utils/TableGen/CMakeLists.txt @@ -19,6 +19,7 @@ MveEmitter.cpp NeonEmitter.cpp SveEmitter.cpp + RISCVVEmitter.cpp TableGen.cpp ) set_target_properties(clang-tblgen PROPERTIES FOLDER "Clang tablegenning") diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp new file mode 100644 --- /dev/null +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -0,0 +1,1142 @@ +//===- RISCVVEmitter.cpp - Generate riscv_vector.h for use with clang -----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This tablegen backend is responsible for emitting riscv_vector.h and +// riscv_vector_generic.h, which includes a declaration and definition of each +// intrinsic fucntions specified in https://github.com/riscv/rvv-intrinsic-doc. +// +// See also the documentation in include/clang/Basic/riscv_vector.td. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/ADT/StringSet.h" +#include "llvm/ADT/Twine.h" +#include "llvm/TableGen/Record.h" + +using namespace llvm; +using BasicType = char; +using VScaleVal = Optional; + +namespace { + +class LMULType { +private: + float LMul; + +public: + LMULType() = delete; + explicit LMULType(int LMUL); + explicit LMULType(float LMUL) : LMul(LMUL) {} + inline float getValue() const { return LMul; } + // Return the C/C++ string representation of LMUL + std::string str() const; + LMULType &operator*=(unsigned RHS); +}; + +// This class is compact representation of a valid and invalid RVVType. +class RVVType { + BasicType BT; + LMULType LMUL; + bool Float, Bool, Signed; + // Constant indices are "int", but have the constant expression. + bool Immediate; + bool Void; + // const qualifier. + bool Constant; + bool Pointer; + bool SIZE_T, PtrDiff_T; + unsigned ElementBitwidth; + VScaleVal Vscale; + bool Valid; + + std::string BuiltinStr; + std::string ClangBuiltinStr; + std::string Str; + std::string ShortStr; + +public: + RVVType(BasicType BT, int LMUL, StringRef prototype); + + // Return the string representation of a type, which is an encoded string for + // passing to the BUILTIN() macro in Builtins.def. + std::string builtin_str() const { return BuiltinStr; } + + // Return the clang buitlin type for RVV vector type which are used in the + // riscv_vector.h header file. + std::string clang_builtin_str() const { return ClangBuiltinStr; } + + // Return the C/C++ string representation of a type for use in the + // riscv_vector.h header file. + std::string str() const { return Str; } + + // Return the short name of a type for C/C++ name suffix. + std::string short_str() const { return ShortStr; } + + bool isValid() const { return Valid; } + bool isScalar() const { + return (Vscale.hasValue() && Vscale.getValue() == 0); + } + bool isVector() const { + return (Vscale.hasValue() && Vscale.getValue() != 0); + } + bool isHalfVector() const { + return (isVector() && Float && ElementBitwidth == 16); + } + bool isFloatVector() const { + return (isVector() && Float && ElementBitwidth == 32); + } + bool isDoubleVector() const { + return (isVector() && Float && ElementBitwidth == 64); + } + +private: + // Verify RVV vector type and set Valid. + bool verifyType() const; + + // Creates a type based on basic types of TypeRange + void applyBasicType(); + + // Applies a prototype modifier to the current type. The result maybe an + // invalid type. + void applyModifier(StringRef prototype); + + // Compute and record the string for legal type. + void compute_builtin_str(); + void compute_clang_builtin_str(); + void compute_str(); + void compute_short_str(); + + // Compute the vscale regardless of legal + Optional getVscale(unsigned ElementBitwidth, LMULType LMUL); +}; + +using RVVTypePtr = std::shared_ptr; +using RVVTypes = std::vector; + +// TODO rafactor Intrinsic class design after support all intrinsic combination. +class Intrinsic { + enum class Extension : uint8_t { + Basic = 0, + F = 1 << 1, + D = 1 << 2, + ZFH = 1 << 3 + }; + +private: + std::string Name; // Builtin name + std::string MangledName; + std::string IRName; + bool HasSideEffects; + bool IsMask; + bool HasMergeOperand; + bool HasVL; + bool HasGeneric; + RVVTypes Types; // Include output and other input + std::vector IntrinsicTypes; // Type name in LLVM IR intrinsic suffix + uint8_t Extensions = 0; + +public: + Intrinsic(StringRef Name, StringRef Suffix, StringRef MangledSuffix, + StringRef IRName, bool HasSideEffects, bool IsMask, + bool HasMergeOperand, bool HasVL, bool HasGeneric, + const RVVTypes &Types, const std::vector &IntrinsicTypes); + ~Intrinsic() = default; + + // Return the architecture preprocessor definitions. + static SmallVector getExtStrings(uint8_t Extensions); + + StringRef getName() const { return Name; } + StringRef getMangledName() const { return MangledName; } + bool hasSideEffects() const { return HasSideEffects; } + bool hasMergeOperand() const { return HasMergeOperand; } + bool hasVL() const { return HasVL; } + bool hasGeneric() const { return HasGeneric; } + size_t getNumOperand() const { return Types.size() - 1; } + // Get output and input types + ArrayRef getTypes() const { return Types; } + ArrayRef getIntrinsicTypes() const { return IntrinsicTypes; } + std::string getIRName() const { return IRName; } + uint8_t getExtensions() const { return static_cast(Extensions); } + + // Return the type string for a BUILTIN() macro in Builtins.def. + std::string getBuiltinTypeStr() const; + + // Return the code block for switch body in EmitRISCVBuiltinExpr, it should + // init the Intrinsic ID and IntrinsicTypes. + std::string getCodeGenSwitchBody() const; + + // Return the function declaration with given function name. argument naming + // are arg_1, arg_2, ... + std::string getFuncDelc(Twine FuncName) const; + + /// Return the function definition for Clang and test + std::string createFunction(Twine FuncName, Twine CalleeName) const; +}; + +using TypeString = std::string; +class RVVEmitter { +private: + RecordKeeper &Records; + // concat BasicType, LMUL and Proto as key + StringMap> LegalTypes; + StringSet<> IllegalTypes; + +public: + RVVEmitter(RecordKeeper &R) : Records(R) {} + + /// Emit riscv_vector.h + void createHeader(raw_ostream &o); + + /// Emit riscv_generic.h + void createGenericHeader(raw_ostream &o); + + /// Emit all the __builtin prototypes and code needed by Sema. + void createBuiltins(raw_ostream &o); + + /// Emit all the information needed to map builtin -> LLVM IR intrinsic. + void createCodeGen(raw_ostream &o); + + /// Emit RISCV Vector tests for Clang + void createTest(raw_ostream &o, bool IsGeneric); + +private: + /// Create intrinsic and add it to \p Out + void createIntrinsic(Record *R, + SmallVectorImpl> &Out); + /// Compute output and input types by applying different config (basic type + /// and LMUL with type transformers). It also record result of type in legal + /// or illegal set to avoid compute the same config again. The result maybe + /// have illegal RVVType. + Optional computeTypes(BasicType BT, int LMUL, + ArrayRef PrototypeSeq); + Optional computeType(BasicType BT, int LMUL, StringRef Proto); + + /// Emit Acrh predecessor definitions and body + void + dumpArchMacroAndBody(SmallVectorImpl> &Defs, + raw_ostream &o, + std::function); +}; + +} // namespace + +//===----------------------------------------------------------------------===// +// Type implementation +//===----------------------------------------------------------------------===// + +LMULType::LMULType(int LMUL) { + unsigned ULMul = std::abs(LMUL); + // check LMUL is -8, -4, -2, 1, 2, 4, 8 + assert(ULMul <= 8 && countPopulation(ULMul) == 1 && "Bad LMUL number!"); + if (LMUL < 0) + LMul = 1.f / static_cast(ULMul); + else + LMul = static_cast(LMUL); +} + +std::string LMULType::str() const { + if (LMul < 1.f) + return "mf" + utostr(static_cast(1 / LMul)); + return "m" + utostr(static_cast(LMul)); +} + +LMULType &LMULType::operator*=(unsigned RHS) { + this->LMul = this->LMul * static_cast(RHS); + return *this; +} + +RVVType::RVVType(BasicType BT, int LMUL, StringRef prototype) + : BT(BT), LMUL(LMULType(LMUL)), Float(false), Bool(false), Signed(true), + Immediate(false), Void(false), Constant(false), Pointer(false), + SIZE_T(false), PtrDiff_T(false), ElementBitwidth(~0U), Vscale(0) { + applyBasicType(); + applyModifier(prototype); + Valid = verifyType(); + if (Valid) { + compute_builtin_str(); + compute_str(); + if (isVector()) { + compute_clang_builtin_str(); + compute_short_str(); + } + } +} + +// Legal RVV vector type combination: +// bool | nxv1i1 | nxv2i1 | nxv4i1 | nxv8i1 | nxv16i1 | nxv32i1 | nxv64i1 + +// | lmul=⅛ | lmul=¼ | lmul=½ | lmul=1 | lmul=2 | lmul=4 | lmul=8 +// ------ | ------ | --------| ------- | ------- | --------| -------- | -------- +// i64 | N/A | N/A | N/A | nxv1i64 | nxv2i64 | nxv4i64 | nxv8i64 +// i32 | N/A | N/A | nxv1i32 | nxv2i32 | nxv4i32 | nxv8i32 | nxv16i32 +// i16 | N/A | nxv1i16 | nxv2i16 | nxv4i16 | nxv8i16 | nxv16i16 | nxv32i16 +// i8 | nxv1i8 | nxv2i8 | nxv4i8 | nxv8i8 | nxv16i8 | nxv32i8 | nxv64i8 +// double | N/A | N/A | N/A | nxv1f64 | nxv2f64 | nxv4f64 | nxv8f64 +// float | N/A | N/A | nxv1f32 | nxv2f32 | nxv4f32 | nxv8f32 | nxv16f32 +// half | N/A | nxv1f16 | nxv2f16 | nxv4f16 | nxv8f16 | nxv16f16 | nxv32f16 +bool RVVType::verifyType() const { + if (isScalar()) + return true; + if (!Vscale.hasValue()) + return false; + if (Float && ElementBitwidth == 8) + return false; + unsigned V = Vscale.getValue(); + switch (ElementBitwidth) { + case 1: + case 8: + // check Vscale is 1,2,4,8,16,32,64 + return (V <= 64 && countPopulation(V) == 1); + case 16: + // check Vscale is 1,2,4,8,16,32 + return (V <= 32 && countPopulation(V) == 1); + case 32: + // check Vscale is 1,2,4,8,16 + return (V <= 16 && countPopulation(V) == 1); + case 64: + // check Vscale is 1,2,4,8 + return (V <= 8 && countPopulation(V) == 1); + } + return false; +} + +void RVVType::compute_builtin_str() { + assert(isValid() && "RVVType is invalid"); + std::string &S = BuiltinStr; + if (Void) { + S = "v"; + return; + } else if (SIZE_T) { + S = "z"; + if (Immediate) + S = "I" + S; + return; + } else if (PtrDiff_T) { + S = "Y"; + return; + } + + if (!Float) { + switch (ElementBitwidth) { + case 1: + S += "b"; + break; + case 8: + S += "c"; + break; + case 16: + S += "s"; + break; + case 32: + S += "i"; + break; + case 64: + S += "Wi"; + break; + default: + llvm_unreachable("Unhandled case!"); + } + } else { + switch (ElementBitwidth) { + case 16: + S += "h"; + break; + case 32: + S += "f"; + break; + case 64: + S += "d"; + break; + default: + llvm_unreachable("Unhandled case!"); + } + } + if (!Float && !Bool) { + if (Signed) + S = "S" + S; + else + S = "U" + S; + } + if (Immediate) { + assert(!Float && "fp immediates are not supported"); + S = "I" + S; + } + if (isScalar()) { + if (Constant) + S += "C"; + if (Pointer) + S += "*"; + return; + } + S = "q" + utostr(Vscale.getValue()) + S; +} + +void RVVType::compute_clang_builtin_str() { + assert(isValid() && "RVVType is invalid"); + assert(isVector() && "Handle Vector type only"); + + std::string &S = ClangBuiltinStr; + S += "__rvv_"; + if (Bool) + S += "bool"; + else if (Float) + S += "float"; + else if (Signed) + S += "int"; + else + S += "uint"; + if (Bool) + S += utostr(64 / Vscale.getValue()); + else + S += utostr(ElementBitwidth); + if (!Bool) { + S += LMUL.str(); + } + S += "_t"; +} + +void RVVType::compute_str() { + assert(isValid() && "RVVType is invalid"); + std::string &S = Str; + if (Void) { + S = "void"; + return; + } else if (SIZE_T) { + S = "size_t"; + return; + } else if (PtrDiff_T) { + S = "ptriff_t"; + return; + } + if (Constant) + S += "const "; + if (isVector()) + S += "v"; + if (Bool) + S += "bool"; + else if (Float) + S += "float"; + else if (Signed) + S += "int"; + else + S += "uint"; + /* vbool1_t = MVT::nxv64i1; + * vbool2_t = MVT::nxv32i1; + * vbool4_t = MVT::nxv16i1; + * vbool8_t = MVT::nxv8i1; + * vbool16_t = MVT::nxv4i1; + * vbool32_t = MVT::nxv2i1; + * vbool64_t = MVT::nxv1i1; + * special cases for vector bool */ + if (Bool && isVector()) + S += utostr(64 / Vscale.getValue()); + else + S += utostr(ElementBitwidth); + // non bool RVV vector type has LMUL + if (isVector() && !Bool) { + S += LMUL.str(); + } + S += "_t"; + if (Pointer) + S += " *"; +} + +void RVVType::compute_short_str() { + assert(isVector() && "only handle vector type"); + if (Bool) { + ShortStr = "b" + utostr(64 / Vscale.getValue()); + return; + } + std::string &S = ShortStr; + if (Float) + S = "f"; + else if (Signed) + S = "i"; + else + S = "u"; + S += utostr(ElementBitwidth) + LMUL.str(); +} + +void RVVType::applyBasicType() { + switch (BT) { + case 'c': + ElementBitwidth = 8; + break; + case 's': + ElementBitwidth = 16; + break; + case 'i': + ElementBitwidth = 32; + break; + case 'l': + ElementBitwidth = 64; + break; + case 'h': + ElementBitwidth = 16; + Float = true; + break; + case 'f': + ElementBitwidth = 32; + Float = true; + break; + case 'd': + ElementBitwidth = 64; + Float = true; + break; + default: + llvm_unreachable("Unhandled type code!"); + } + assert(ElementBitwidth != ~0U && "Bad element bitwidth!"); +} + +void RVVType::applyModifier(StringRef transformer) { + if (transformer.empty()) + return; + // handle primitive type transformer + switch (transformer.back()) { + case 'e': + Vscale = 0; + break; + case 'v': + Vscale = getVscale(ElementBitwidth, LMUL); + break; + case 'w': + ElementBitwidth *= 2; + LMUL *= 2; + Vscale = getVscale(ElementBitwidth, LMUL); + break; + case 'q': + ElementBitwidth *= 4; + LMUL *= 4; + Vscale = getVscale(ElementBitwidth, LMUL); + break; + case 'o': + ElementBitwidth *= 8; + LMUL *= 8; + Vscale = getVscale(ElementBitwidth, LMUL); + break; + case 'm': + /* vbool1_t = MVT::nxv64i1; + * vbool2_t = MVT::nxv32i1; + * vbool4_t = MVT::nxv16i1; + * vbool8_t = MVT::nxv8i1; + * vbool16_t = MVT::nxv4i1; + * vbool32_t = MVT::nxv2i1; + * vbool64_t = MVT::nxv1i1; */ + Bool = true; + Float = false; + Vscale = getVscale(ElementBitwidth, LMUL); + ElementBitwidth = 1; + break; + case '0': + Void = true; + break; + case 'z': + SIZE_T = true; + break; + case 't': + PtrDiff_T = true; + break; + case 'c': // uint8_t + Signed = false; + ElementBitwidth = 8; + Vscale = 0; + break; + default: + llvm_unreachable("Illegal primitive type transformers!"); + } + + // compute type transformers + for (char I : transformer.take_front(transformer.size() - 1)) { + switch (I) { + case 'P': + Pointer = true; + break; + case 'C': + Constant = true; + break; + case 'K': + Immediate = true; + break; + case 'U': + Signed = false; + break; + case 'I': + Float = false; + break; + case 'F': + Float = true; + break; + case 'W': + assert(isVector() && "'W' type transformer cannot be used on vectors"); + ElementBitwidth *= 2; + break; + case 'S': + LMUL = LMULType(1); + break; + default: + llvm_unreachable("Illegal non-primitive type transformer!"); + } + } +} + +VScaleVal RVVType::getVscale(unsigned ElementBitwidth, LMULType LMUL) { + float result = 0.f; + switch (ElementBitwidth) { + default: + break; + case 8: + result = LMUL.getValue() * 8.f; + break; + case 16: + result = LMUL.getValue() * 4.f; + break; + case 32: + result = LMUL.getValue() * 2.f; + break; + case 64: + result = LMUL.getValue(); + break; + } + // illegal type result would be less than 1.f + if (result < 1.f) + return None; + return Optional(static_cast(result)); +} + +//===----------------------------------------------------------------------===// +// Intrinsic implementation +//===----------------------------------------------------------------------===// +Intrinsic::Intrinsic(StringRef NewName, StringRef Suffix, + StringRef MangledSuffix, StringRef IRName, + bool HasSideEffects, bool IsMask, bool HasMergeOperand, + bool HasVL, bool HasGeneric, const RVVTypes &Types, + const std::vector &IntrinsicTypes) + : IRName(IRName), HasSideEffects(HasSideEffects), IsMask(IsMask), + HasMergeOperand(HasMergeOperand), HasVL(HasVL), HasGeneric(HasGeneric), + Types(Types), IntrinsicTypes(IntrinsicTypes) { + + // Init Name and MangledName + Name = NewName.str(); + MangledName = Twine(NewName.split("_").first).str(); + if (Suffix.size()) + Name += "_" + Suffix.str(); + if (MangledSuffix.size()) + MangledName += "_" + MangledSuffix.str(); + if (IsMask) { + Name += "_m"; + MangledName += "_m"; + } + if (HasVL) { + Name += "_vl"; + MangledName += "_vl"; + } + // Init Extensions + for (const auto &T : Types) { + if (T->isHalfVector()) { + Extensions |= static_cast(Extension::ZFH); + } else if (T->isFloatVector()) { + Extensions |= static_cast(Extension::F); + } else if (T->isDoubleVector()) { + Extensions |= static_cast(Extension::D); + } + } +} + +SmallVector Intrinsic::getExtStrings(uint8_t Extents) { + if (Extents == 0) + return {}; + SmallVector ExtVector; + // D imply F + if (Extents & static_cast(Extension::F)) { + ExtVector.emplace_back("__riscv_flen == 32"); + ExtVector.emplace_back("__riscv_f != 0"); + ExtVector.emplace_back("__riscv_flen == 64"); + ExtVector.emplace_back("__riscv_d != 0"); + } + if (Extents & static_cast(Extension::D)) { + ExtVector.emplace_back("__riscv_flen == 64"); + ExtVector.emplace_back("__riscv_d != 0"); + } + if (Extents & static_cast(Extension::ZFH)) { + ExtVector.emplace_back("__riscv_zfh != 0"); + } + return ExtVector; +} + +std::string Intrinsic::getBuiltinTypeStr() const { + std::string S; + for (const auto &T : Types) { + S += T->builtin_str(); + } + return S; +} + +std::string Intrinsic::getCodeGenSwitchBody() const { + + auto getIntrinsicTypesString = + [this](const std::vector &IntrinsicTypes) { + std::string S = " ID = Intrinsic::riscv_" + getIRName() + ";\n"; + + S += " IntrinsicTypes = {"; + for (const auto &Idx : IntrinsicTypes) { + if (Idx == -1) + S += "ResultType"; + else + S += ", Ops[" + utostr(static_cast(Idx)) + "]->getType()"; + } + + // VL could be i64 or i32, need to encode it in IntrinsicTypes. VL is + // always last operand. + if (hasVL()) + S += ", Ops[" + utostr(getNumOperand() - 1) + "]->getType()"; + S += "};\n"; + return S; + }; + + if (!IsMask) { + return getIntrinsicTypesString(getIntrinsicTypes()); + } else { + // IntrinsicTypes is ummasked version index + // we need to update IntrinsicTypes because it does not count the additional + // mask (first operand) and merge operand (second operand) + signed Skew = 1; + if (hasMergeOperand()) + Skew = 2; + std::vector NewIntrinsicTypes = getIntrinsicTypes(); + for (auto &I : NewIntrinsicTypes) { + if (I >= 0) + I += Skew; + } + std::string S = getIntrinsicTypesString(NewIntrinsicTypes); + + // The order of operands is (mask, maskedoff, op1, op2, vl). + // The order of intrinsic operands is (maskedoff, op1, op2, mask, vl) + // or + // The order of operands is (mask, op1, op2, vl). + // The order of intrinsic operands is (op1, op2, mask, vl) + S += " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);\n"; + return S; + } +} + +std::string Intrinsic::getFuncDelc(Twine FuncName) const { + // index 0 is output type + std::string S = + Twine(Types[0]->str() + Twine(" ") + FuncName + Twine("(")).str(); + + auto concat = [&](std::string a, const RVVTypePtr &b) { + size_t idx = &b - &Types[0]; + return std::move(a) + ", " + b->str() + " arg_" + utostr(idx); + }; + // append function arguments string + if (Types.size() > 1) { + std::string Arguments = std::accumulate(Types.begin() + 2, Types.end(), + Types[1]->str() + " arg_1", concat); + S += Arguments; + } + S += ")"; + return S; +} + +std::string Intrinsic::createFunction(Twine FuncName, Twine CalleeName) const { + std::string S(getFuncDelc(FuncName)); + S += " {\n"; + + S += Twine(" return " + CalleeName + "(").str(); + // append parameter variables + if (Types.size() > 1) { + S += "arg_1"; + for (unsigned i = 2; i < Types.size(); ++i) + S += ", arg_" + utostr(i); + } + S += ");\n"; + + S += "}\n\n"; + return S; +} + +//===----------------------------------------------------------------------===// +// RVVEmitter implementation +//===----------------------------------------------------------------------===// +void RVVEmitter::createHeader(raw_ostream &OS) { + + OS << "/*===---- riscv_vector.h - RISC-V V-extension Intrinsics " + "-------------------===\n" + " *\n" + " *\n" + " * Part of the LLVM Project, under the Apache License v2.0 with LLVM " + "Exceptions.\n" + " * See https://llvm.org/LICENSE.txt for license information.\n" + " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" + " *\n" + " *===-----------------------------------------------------------------" + "------===\n" + " */\n\n"; + + OS << "#ifndef _RISCV_VECTOR_H\n"; + OS << "#define _RISCV_VECTOR_H\n\n"; + + OS << "#include \n"; + OS << "#include \n\n"; + + OS << "#ifndef __riscv_vector\n"; + OS << "#error \"Vector intrinsics require the vector extension.\"\n"; + OS << "#else\n\n"; + + // TODO: Which C type should be used for float16_t need to update psabi spec + // first. + // OS << "typedef __fp16 float16_t;\n"; + OS << "typedef float float32_t;\n"; + OS << "typedef double float64_t;\n\n"; + + OS << "#ifdef __cplusplus\n"; + OS << "extern \"C\" {\n"; + OS << "#endif\n\n"; + + // parse records + SmallVector, 512> Defs; + std::vector RV = Records.getAllDerivedDefinitions("RVVBuiltin"); + for (auto *R : RV) + createIntrinsic(R, Defs); + + // Dump RVV boolean types. + auto dumpType = [&](auto T) { + OS << "typedef " << T->clang_builtin_str() << " " << T->str() << ";\n"; + }; + + constexpr int LMUL[] = {-8, -4, -2, 1, 2, 4, 8}; + for (int LMul : LMUL) { + auto T = computeType('c', LMul, "m"); + if (T.hasValue()) + dumpType(T.getValue()); + } + // Dump RVV int/float types. + for (char I : StringRef("csil")) + for (int LMul : LMUL) { + auto T = computeType(I, LMul, "v"); + if (T.hasValue()) { + dumpType(T.getValue()); + auto UT = computeType(I, LMul, "Uv"); + dumpType(UT.getValue()); + } + } + // Dump RVV float types. + OS << "#if (__riscv_zfh != 0)\n"; + for (int LMul : LMUL) { + auto T = computeType('h', LMul, "v"); + if (T.hasValue()) + dumpType(T.getValue()); + } + OS << "#endif\n"; + + // D imply F + OS << "#if (__riscv_flen == 32) || (__riscv_f != 0) || (__riscv_flen == 64) " + "|| (__riscv_d != 0)\n"; + for (int LMul : LMUL) { + auto T = computeType('f', LMul, "v"); + if (T.hasValue()) + dumpType(T.getValue()); + } + OS << "#endif\n"; + + OS << "#if (__riscv_flen == 64) || (__riscv_d != 0)\n"; + for (int LMul : LMUL) { + auto T = computeType('d', LMul, "v"); + if (T.hasValue()) + dumpType(T.getValue()); + } + OS << "#endif\n\n"; + + // Dump intrinsic functions with macro + dumpArchMacroAndBody( + Defs, OS, [this](raw_ostream &OS, const Intrinsic &Inst) { + OS << "// " << Inst.getFuncDelc(Inst.getName()) << "\n"; + OS << "#define " << Inst.getName() << "(...) __builtin_rvv_" + << Inst.getName() << "(__VA_ARGS__)\n"; + }); + + OS << "\n#ifdef __cplusplus\n"; + OS << "}\n"; + OS << "#endif\n"; + OS << "#endif // __riscv_vector\n"; + OS << "#endif // _RISCV_VECTOR_H\n"; +} + +void RVVEmitter::createGenericHeader(raw_ostream &OS) { + + OS << "#include \n\n"; + + // Dump intrinsic functions macro + SmallVector, 512> Defs; + std::vector RV = Records.getAllDerivedDefinitions("RVVBuiltin"); + for (auto *R : RV) + createIntrinsic(R, Defs); + + dumpArchMacroAndBody( + Defs, OS, [this](raw_ostream &OS, const Intrinsic &Inst) { + if (!Inst.hasGeneric()) + return; + OS << StringRef( + "static inline __attribute__((__always_inline__, __nodebug__, " + "__overloadable__))\n"); + OS << Inst.createFunction(Inst.getMangledName(), Inst.getName()); + }); +} + +void RVVEmitter::createBuiltins(raw_ostream &OS) { + SmallVector, 512> Defs; + std::vector RV = Records.getAllDerivedDefinitions("RVVBuiltin"); + for (auto *R : RV) + createIntrinsic(R, Defs); + + OS << "#if defined(BUILTIN) && !defined(RISCVV_BUILTIN)\n"; + OS << "#define RISCVV_BUILTIN(ID, TYPE, ATTRS) BUILTIN(ID, TYPE, ATTRS)\n"; + OS << "#endif\n"; + for (auto &Def : Defs) { + OS << "RISCVV_BUILTIN(" << Def->getName() << ",\"" + << Def->getBuiltinTypeStr() << "\", "; + if (!Def->hasSideEffects()) + OS << "\"n\")\n"; + else + OS << "\"\")\n"; + } + OS << "\n#undef BUILTIN\n"; + OS << "#undef RISCVV_BUILTIN\n"; +} + +void RVVEmitter::createCodeGen(raw_ostream &OS) { + SmallVector, 512> Defs; + std::vector RV = Records.getAllDerivedDefinitions("RVVBuiltin"); + for (auto *R : RV) + createIntrinsic(R, Defs); + + // Same intrinsic name have the same switch body + llvm::StringMap, 128>> DefsSet; + for (auto &def : Defs) { + DefsSet[def->getIRName()].push_back(std::move(def)); + } + for (const auto &KV : DefsSet) { + for (const auto &I : KV.getValue()) { + OS << "case RISCV::BI" << I->getName() << ":\n"; + } + OS << KV.getValue()[0]->getCodeGenSwitchBody() << "\n"; + OS << " break;\n"; + } +} + +void RVVEmitter::createTest(raw_ostream &OS, bool IsGeneric) { + SmallVector, 512> Defs; + std::vector RV = Records.getAllDerivedDefinitions("RVVBuiltin"); + for (auto *R : RV) + createIntrinsic(R, Defs); + + std::string S; + for (auto &Def : Defs) { + // Some intrinsis have no generic functions + if (!Def->hasGeneric() && IsGeneric) + continue; + StringRef Name(IsGeneric ? Def->getMangledName() : Def->getName()); + S += Def->createFunction(Twine("test_" + Def->getName()), Name); + } + if (S.empty()) + return; + OS << "// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature " + "+d -target-feature +experimental-v \\\n"; + OS << "// RUN: -target-feature +experimental-zfh -disable-O0-optnone " + "-emit-llvm %s -o - | opt -S -mem2reg | FileCheck " + "--check-prefix=CHECK-RV64 %s\n"; + OS << "// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature " + "+d -target-feature +experimental-v \\\n"; + OS << "// RUN: -target-feature +experimental-zfh -disable-O0-optnone " + "-emit-llvm %s -o - | opt -S -mem2reg | FileCheck " + "--check-prefix=CHECK-RV32 %s\n"; + OS << "// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature " + "+d -target-feature +experimental-v \\\n"; + OS << "// RUN: -target-feature +experimental-zfh -Werror -Wall -o - %s " + ">/dev/null 2>%t\n"; + OS << "// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t\n\n"; + + OS << "// NOTE: This file is autogenerated by 'bin/clang-tblgen " + "-gen-riscv-vector-test clang/include/clang/Basic/riscv_vector.td " + "-D='\n\n"; + OS << "// ASM-NOT: warning\n"; + if (IsGeneric) + OS << "#include \n\n"; + else + OS << "#include \n\n"; + OS << S; +} + +void RVVEmitter::createIntrinsic( + Record *R, SmallVectorImpl> &Out) { + StringRef Name = R->getValueAsString("Name"); + StringRef Suffix = R->getValueAsString("Suffix"); + StringRef MangledSuffix = R->getValueAsString("MangledSuffix"); + std::string Prototypes = R->getValueAsString("Prototype").data(); + StringRef TypeRange = R->getValueAsString("TypeRange"); + bool HasMask = R->getValueAsBit("HasMask"); + bool HasMergeOperand = R->getValueAsBit("HasMergeOperand"); + bool HasVL = R->getValueAsBit("HasVL"); + bool HasGeneric = R->getValueAsBit("HasGeneric"); + bool HasSideEffects = R->getValueAsBit("HasSideEffects"); + std::vector LMULList = R->getValueAsListOfInts("LMUL"); + std::vector IntrinsicTypes = + R->getValueAsListOfInts("IntrinsicTypes"); + StringRef IRName = R->getValueAsString("IRName"); + StringRef IRNameMask = R->getValueAsString("IRNameMask"); + + // Parse prototype and create a list of primitve type with transformers + // (operand) in ProtoSeq. ProtoSeq[0] is output operand. + SmallVector ProtoSeq; + const StringRef Pirmaries("evwqom0ztc"); + size_t start = 0; + for (size_t i = 0; i < Prototypes.size(); ++i) { + if (Pirmaries.find(Prototypes[i]) != StringRef::npos) { + ProtoSeq.push_back(Prototypes.substr(start, i - start + 1)); + start = i + 1; + } + } + // if HasVL, append 'z' to last operand + if (HasVL) + ProtoSeq.push_back("z"); + + SmallVector ProtoMaskSeq = ProtoSeq; + if (HasMask) { + // if HasMask, insert 'm' as first input operand + ProtoMaskSeq.insert(ProtoMaskSeq.begin() + 1, "m"); + // if HasMergeOperand, insert result type as second input operand + if (HasMergeOperand) + ProtoMaskSeq.insert(ProtoMaskSeq.begin() + 2, ProtoSeq[0]); + } + + // Create Intrinsics for each type and LMUL + for (char I : TypeRange) { + for (int LMUL : LMULList) { + Optional Types = computeTypes(I, LMUL, ProtoSeq); + // Ignored to create new intrinsic if there is any illegal type. + if (!Types.hasValue()) + continue; + + auto SuffixStr = computeType(I, LMUL, Suffix).getValue()->short_str(); + auto MSuffixStr = + MangledSuffix.empty() + ? "" + : computeType(I, LMUL, MangledSuffix).getValue()->short_str(); + // Create a non-mask intrinsic + Out.push_back(std::make_unique( + Name, SuffixStr, MSuffixStr, IRName, HasSideEffects, + /*IsMask*/ false, /*HasMergeOperand*/ false, HasVL, HasGeneric, + Types.getValue(), IntrinsicTypes)); + if (HasMask) { + // Create a mask intrinsic + Optional MaskTypes = computeTypes(I, LMUL, ProtoMaskSeq); + Out.push_back(std::make_unique( + Name, SuffixStr, MSuffixStr, IRNameMask, HasSideEffects, + /*IsMask*/ true, HasMergeOperand, HasVL, HasGeneric, + MaskTypes.getValue(), IntrinsicTypes)); + } + } // end for LMUL + } // end for TypeRange +} + +Optional +RVVEmitter::computeTypes(BasicType BT, int LMUL, + ArrayRef PrototypeSeq) { + RVVTypes Types; + for (std::string Proto : PrototypeSeq) { + auto T = computeType(BT, LMUL, Proto); + if (!T.hasValue()) { + return llvm::None; + } + // record legal type index + Types.push_back(T.getValue()); + } + return Optional(Types); +} + +Optional RVVEmitter::computeType(BasicType BT, int LMUL, + StringRef Proto) { + TypeString Idx = Twine(BT + utostr(LMUL) + Proto).str(); + // search first + if (LegalTypes.count(Idx)) { + return Optional(LegalTypes[Idx]); + } else if (IllegalTypes.count(Idx)) { + return llvm::None; + } + // compute type and record the result + auto T = std::make_shared(BT, LMUL, Proto); + if (T->isValid()) { + // record legal type index and value + LegalTypes.insert({Idx, T}); + return Optional(T); + } + // record illegal type index + IllegalTypes.insert(Idx); + return llvm::None; +} + +void RVVEmitter::dumpArchMacroAndBody( + SmallVectorImpl> &Defs, raw_ostream &OS, + std::function DumpBody) { + + // collect same extensions in one set. + DenseMap, 256>> DefsSet; + for (auto &def : Defs) { + DefsSet[def->getExtensions()].push_back(std::move(def)); + } + + for (const auto &KV : DefsSet) { + SmallVector ExtStrings = + Intrinsic::getExtStrings(KV.getFirst()); + // dump arch predecessor definitions + if (ExtStrings.size()) { + std::string ArchMacro = std::accumulate( + ExtStrings.begin() + 1, ExtStrings.end(), "(" + ExtStrings[0] + ")", + [](std::string a, const std::string &b) { + return std::move(a) + " || (" + b + ")"; + }); + OS << "#if " << ArchMacro << "\n"; + } + for (auto &Def : KV.getSecond()) { + DumpBody(OS, *Def); + } + if (ExtStrings.size()) + OS << "#endif\n\n"; + } +} + +namespace clang { +void EmitRVVHeader(RecordKeeper &Records, raw_ostream &OS) { + RVVEmitter(Records).createHeader(OS); +} + +void EmitRVVGenericHeader(RecordKeeper &Records, raw_ostream &OS) { + RVVEmitter(Records).createGenericHeader(OS); +} + +void EmitRVVBuiltins(RecordKeeper &Records, raw_ostream &OS) { + RVVEmitter(Records).createBuiltins(OS); +} + +void EmitRVVBuiltinCG(RecordKeeper &Records, raw_ostream &OS) { + RVVEmitter(Records).createCodeGen(OS); +} + +void EmitRVVTest(RecordKeeper &Records, raw_ostream &OS) { + RVVEmitter(Records).createTest(OS, /*IsGeneric*/ false); +} + +void EmitRVVGenericTest(RecordKeeper &Records, raw_ostream &OS) { + RVVEmitter(Records).createTest(OS, /*IsGeneric*/ true); +} + +} // End namespace clang diff --git a/clang/utils/TableGen/TableGen.cpp b/clang/utils/TableGen/TableGen.cpp --- a/clang/utils/TableGen/TableGen.cpp +++ b/clang/utils/TableGen/TableGen.cpp @@ -83,6 +83,12 @@ GenArmCdeBuiltinSema, GenArmCdeBuiltinCG, GenArmCdeBuiltinAliases, + GenRISCVVectorHeader, + GenRISCVVectorGenericHeader, + GenRISCVVectorBuiltins, + GenRISCVVectorBuiltinCG, + GenRISCVVectorTest, + GenRISCVVectorGenericTest, GenAttrDocs, GenDiagDocs, GenOptDocs, @@ -228,6 +234,19 @@ "Generate ARM CDE builtin code-generator for clang"), clEnumValN(GenArmCdeBuiltinAliases, "gen-arm-cde-builtin-aliases", "Generate list of valid ARM CDE builtin aliases for clang"), + clEnumValN(GenRISCVVectorHeader, "gen-riscv-vector-header", + "Generate riscv_vector.h for clang"), + clEnumValN(GenRISCVVectorGenericHeader, + "gen-riscv-vector-generic-header", + "Generate riscv_vector_generic.h for clang"), + clEnumValN(GenRISCVVectorBuiltins, "gen-riscv-vector-builtins", + "Generate riscv_vector_builtins.inc for clang"), + clEnumValN(GenRISCVVectorBuiltinCG, "gen-riscv-vector-builtin-codegen", + "Generate riscv_vector_builtin_cg.inc for clang"), + clEnumValN(GenRISCVVectorTest, "gen-riscv-vector-test", + "Generate RISCV Vector tests for clang"), + clEnumValN(GenRISCVVectorGenericTest, "gen-riscv-vector-generic-test", + "Generate RISCV Vector generic tests for clang"), clEnumValN(GenAttrDocs, "gen-attr-docs", "Generate attribute documentation"), clEnumValN(GenDiagDocs, "gen-diag-docs", @@ -428,6 +447,24 @@ case GenArmCdeBuiltinAliases: EmitCdeBuiltinAliases(Records, OS); break; + case GenRISCVVectorHeader: + EmitRVVHeader(Records, OS); + break; + case GenRISCVVectorGenericHeader: + EmitRVVGenericHeader(Records, OS); + break; + case GenRISCVVectorBuiltins: + EmitRVVBuiltins(Records, OS); + break; + case GenRISCVVectorBuiltinCG: + EmitRVVBuiltinCG(Records, OS); + break; + case GenRISCVVectorTest: + EmitRVVTest(Records, OS); + break; + case GenRISCVVectorGenericTest: + EmitRVVGenericTest(Records, OS); + break; case GenAttrDocs: EmitClangAttrDocs(Records, OS); break; diff --git a/clang/utils/TableGen/TableGenBackends.h b/clang/utils/TableGen/TableGenBackends.h --- a/clang/utils/TableGen/TableGenBackends.h +++ b/clang/utils/TableGen/TableGenBackends.h @@ -106,6 +106,13 @@ void EmitMveBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); void EmitMveBuiltinAliases(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); +void EmitRVVHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); +void EmitRVVGenericHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); +void EmitRVVBuiltins(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); +void EmitRVVBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); +void EmitRVVTest(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); +void EmitRVVGenericTest(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); + void EmitCdeHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); void EmitCdeBuiltinDef(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); void EmitCdeBuiltinSema(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); diff --git a/clang/utils/TestUtils/gen-riscv-v-tests.sh b/clang/utils/TestUtils/gen-riscv-v-tests.sh new file mode 100755 --- /dev/null +++ b/clang/utils/TestUtils/gen-riscv-v-tests.sh @@ -0,0 +1,41 @@ +#!/bin/bash +# +# Generate riscv-v vector intrinsic tests in clang/test/CodeGen/RISCV/riscv-rvv-intrinsics +# and clang/test/CodeGen/RISCV/riscv-rvv-intrinsics-generic +# +# usages: gen-riscv-v-tests.sh path/to/llvm/bin +# +# 1. use 'clang-tblgen' to read clang/include/clang/Basic/riscv_vector.td +# and generate tests. +# 2. use update_cc_test_checks.py to update the expected result + +if [ "$#" -ne 1 ]; then + echo "Usages: ./gen-riscv-v-tests.sh path/to/llvm/bin" + exit +fi + +src_path="$(dirname $(realpath $0))/../../../" +bin_path=$1 + +gen_tests(){ +# op_list have marco name used in riscv_vector.td + local op_list="VADD VFADD" + local option="$1" + local suffix="$2" + local path="${src_path}/clang/test/CodeGen/RISCV/riscv-rvv-intrinsics${suffix}" + if [[ ! -d $path ]]; then + mkdir $path -p + fi + for op in ${op_list}; do + local file=${path}/${op,,}.c + ${bin_path}/clang-tblgen $option ${src_path}/clang/include/clang/Basic/riscv_vector.td -o=${file} --write-if-changed -D=${op} + if [ -s ${file} ]; then + ${src_path}/llvm/utils/update_cc_test_checks.py --llvm-bin=${bin_path} ${file} + else + rm ${file} + fi + done +} + +gen_tests "-gen-riscv-vector-test" "" +gen_tests "-gen-riscv-vector-generic-test" "-generic" diff --git a/llvm/docs/CommandGuide/tblgen.rst b/llvm/docs/CommandGuide/tblgen.rst --- a/llvm/docs/CommandGuide/tblgen.rst +++ b/llvm/docs/CommandGuide/tblgen.rst @@ -541,6 +541,30 @@ Generate list of valid ARM CDE builtin aliases for Clang. +.. option:: -gen-riscv-vector-header + + Generate ``riscv_vector.h`` for Clang. + +.. option:: -gen-riscv-vector-generic-header + + Generate ``riscv_vector_generic.h`` for Clang. + +.. option:: -gen-riscv-vector-builtins + + Generate ``riscv_vector_builtins.inc`` for Clang. + +.. option:: -gen-riscv-vector-builtin-codegen + + Generate ``riscv_vector_builtin_cg.inc`` for Clang. + +.. option:: -gen-riscv-vector-generic-test + + Generate RISCV Vector tests for Clang. + +.. option:: -gen-riscv-vector-generic-generic-test + + Generate RISCV Vector generic tests for Clang. + .. option:: -gen-attr-docs Generate attribute documentation.